hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
926107043fbf13e2462c937c61efbcb9ab47413883b0652fe359834b4d39aab8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import os
from os.path import join
import os.path
import shutil
import sys
from collections import defaultdict
from setuptools import Extension
from setuptools.dep_util import newer_group
import numpy
from extension_helpers import import_file, write_if_different, get_compiler, pkg_config
WCSROOT = os.path.relpath(os.path.dirname(__file__))
WCSVERSION = "7.11"
def b(s):
return s.encode('ascii')
def string_escape(s):
s = s.decode('ascii').encode('ascii', 'backslashreplace')
s = s.replace(b'\n', b'\\n')
s = s.replace(b'\0', b'\\0')
return s.decode('ascii')
def determine_64_bit_int():
"""
The only configuration parameter needed at compile-time is how to
specify a 64-bit signed integer. Python's ctypes module can get us
that information.
If we can't be absolutely certain, we default to "long long int",
which is correct on most platforms (x86, x86_64). If we find
platforms where this heuristic doesn't work, we may need to
hardcode for them.
"""
try:
try:
import ctypes
except ImportError:
raise ValueError()
if ctypes.sizeof(ctypes.c_longlong) == 8:
return "long long int"
elif ctypes.sizeof(ctypes.c_long) == 8:
return "long int"
elif ctypes.sizeof(ctypes.c_int) == 8:
return "int"
else:
raise ValueError()
except ValueError:
return "long long int"
def write_wcsconfig_h(paths):
"""
Writes out the wcsconfig.h header with local configuration.
"""
h_file = io.StringIO()
h_file.write("""
/* The bundled version has WCSLIB_VERSION */
#define HAVE_WCSLIB_VERSION 1
/* WCSLIB library version number. */
#define WCSLIB_VERSION {}
/* 64-bit integer data type. */
#define WCSLIB_INT64 {}
/* Windows needs some other defines to prevent inclusion of wcsset()
which conflicts with wcslib's wcsset(). These need to be set
on code that *uses* astropy.wcs, in addition to astropy.wcs itself.
*/
#if defined(_WIN32) || defined(_MSC_VER) || defined(__MINGW32__) || defined (__MINGW64__)
#ifndef YY_NO_UNISTD_H
#define YY_NO_UNISTD_H
#endif
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS
#endif
#ifndef _NO_OLDNAMES
#define _NO_OLDNAMES
#endif
#ifndef NO_OLDNAMES
#define NO_OLDNAMES
#endif
#ifndef __STDC__
#define __STDC__ 1
#endif
#endif
""".format(WCSVERSION, determine_64_bit_int()))
content = h_file.getvalue().encode('ascii')
for path in paths:
write_if_different(path, content)
######################################################################
# GENERATE DOCSTRINGS IN C
def generate_c_docstrings():
docstrings = import_file(os.path.join(WCSROOT, 'docstrings.py'))
docstrings = docstrings.__dict__
keys = [
key for key, val in docstrings.items()
if not key.startswith('__') and isinstance(val, str)]
keys.sort()
docs = {}
for key in keys:
docs[key] = docstrings[key].encode('utf8').lstrip() + b'\0'
h_file = io.StringIO()
h_file.write("""/*
DO NOT EDIT!
This file is autogenerated by astropy/wcs/setup_package.py. To edit
its contents, edit astropy/wcs/docstrings.py
*/
#ifndef __DOCSTRINGS_H__
#define __DOCSTRINGS_H__
""")
for key in keys:
val = docs[key]
h_file.write(f'extern char doc_{key}[{len(val)}];\n')
h_file.write("\n#endif\n\n")
write_if_different(
join(WCSROOT, 'include', 'astropy_wcs', 'docstrings.h'),
h_file.getvalue().encode('utf-8'))
c_file = io.StringIO()
c_file.write("""/*
DO NOT EDIT!
This file is autogenerated by astropy/wcs/setup_package.py. To edit
its contents, edit astropy/wcs/docstrings.py
The weirdness here with strncpy is because some C compilers, notably
MSVC, do not support string literals greater than 256 characters.
*/
#include <string.h>
#include "astropy_wcs/docstrings.h"
""")
for key in keys:
val = docs[key]
c_file.write(f'char doc_{key}[{len(val)}] = {{\n')
for i in range(0, len(val), 12):
section = val[i:i+12]
c_file.write(' ')
c_file.write(''.join(f'0x{x:02x}, ' for x in section))
c_file.write('\n')
c_file.write(" };\n\n")
write_if_different(
join(WCSROOT, 'src', 'docstrings.c'),
c_file.getvalue().encode('utf-8'))
def get_wcslib_cfg(cfg, wcslib_files, include_paths):
debug = '--debug' in sys.argv
cfg['include_dirs'].append(numpy.get_include())
cfg['define_macros'].extend([
('ECHO', None),
('WCSTRIG_MACRO', None),
('ASTROPY_WCS_BUILD', None),
('_GNU_SOURCE', None)])
if ((int(os.environ.get('ASTROPY_USE_SYSTEM_WCSLIB', 0))
or int(os.environ.get('ASTROPY_USE_SYSTEM_ALL', 0)))
and not sys.platform == 'win32'):
wcsconfig_h_path = join(WCSROOT, 'include', 'wcsconfig.h')
if os.path.exists(wcsconfig_h_path):
os.unlink(wcsconfig_h_path)
for k, v in pkg_config(['wcslib'], ['wcs']).items():
cfg[k].extend(v)
else:
write_wcsconfig_h(include_paths)
wcslib_path = join("cextern", "wcslib") # Path to wcslib
wcslib_cpath = join(wcslib_path, "C") # Path to wcslib source files
cfg['sources'].extend(join(wcslib_cpath, x) for x in wcslib_files)
cfg['include_dirs'].append(wcslib_cpath)
if debug:
cfg['define_macros'].append(('DEBUG', None))
cfg['undef_macros'].append('NDEBUG')
if (not sys.platform.startswith('sun') and
not sys.platform == 'win32'):
cfg['extra_compile_args'].extend(["-fno-inline", "-O0", "-g"])
else:
# Define ECHO as nothing to prevent spurious newlines from
# printing within the libwcs parser
cfg['define_macros'].append(('NDEBUG', None))
cfg['undef_macros'].append('DEBUG')
if sys.platform == 'win32':
# These are written into wcsconfig.h, but that file is not
# used by all parts of wcslib.
cfg['define_macros'].extend([
('YY_NO_UNISTD_H', None),
('_CRT_SECURE_NO_WARNINGS', None),
('_NO_OLDNAMES', None), # for mingw32
('NO_OLDNAMES', None), # for mingw64
('__STDC__', None) # for MSVC
])
if sys.platform.startswith('linux'):
cfg['define_macros'].append(('HAVE_SINCOS', None))
# For 4.7+ enable C99 syntax in older compilers (need 'gnu99' std for gcc)
if get_compiler() == 'unix':
cfg['extra_compile_args'].extend(['-std=gnu99'])
# Squelch a few compilation warnings in WCSLIB
if get_compiler() in ('unix', 'mingw32'):
if not debug:
cfg['extra_compile_args'].extend([
'-Wno-strict-prototypes',
'-Wno-unused-function',
'-Wno-unused-value',
'-Wno-uninitialized'])
def get_extensions():
generate_c_docstrings()
######################################################################
# DISTUTILS SETUP
cfg = defaultdict(list)
wcslib_files = [ # List of wcslib files to compile
'flexed/wcsbth.c',
'flexed/wcspih.c',
'flexed/wcsulex.c',
'flexed/wcsutrn.c',
'cel.c',
'dis.c',
'lin.c',
'log.c',
'prj.c',
'spc.c',
'sph.c',
'spx.c',
'tab.c',
'wcs.c',
'wcserr.c',
'wcsfix.c',
'wcshdr.c',
'wcsprintf.c',
'wcsunits.c',
'wcsutil.c'
]
wcslib_config_paths = [
join(WCSROOT, 'include', 'astropy_wcs', 'wcsconfig.h'),
join(WCSROOT, 'include', 'wcsconfig.h')
]
get_wcslib_cfg(cfg, wcslib_files, wcslib_config_paths)
cfg['include_dirs'].append(join(WCSROOT, "include"))
astropy_wcs_files = [ # List of astropy.wcs files to compile
'distortion.c',
'distortion_wrap.c',
'docstrings.c',
'pipeline.c',
'pyutil.c',
'astropy_wcs.c',
'astropy_wcs_api.c',
'sip.c',
'sip_wrap.c',
'str_list_proxy.c',
'unit_list_proxy.c',
'util.c',
'wcslib_wrap.c',
'wcslib_auxprm_wrap.c',
'wcslib_prjprm_wrap.c',
'wcslib_celprm_wrap.c',
'wcslib_tabprm_wrap.c',
'wcslib_wtbarr_wrap.c'
]
cfg['sources'].extend(join(WCSROOT, 'src', x) for x in astropy_wcs_files)
cfg['sources'] = [str(x) for x in cfg['sources']]
cfg = {str(key): val for key, val in cfg.items()}
# Copy over header files from WCSLIB into the installed version of Astropy
# so that other Python packages can write extensions that link to it. We
# do the copying here then include the data in [options.package_data] in
# the setup.cfg file
wcslib_headers = [
'cel.h',
'lin.h',
'prj.h',
'spc.h',
'spx.h',
'tab.h',
'wcs.h',
'wcserr.h',
'wcsmath.h',
'wcsprintf.h',
]
if not (int(os.environ.get('ASTROPY_USE_SYSTEM_WCSLIB', 0))
or int(os.environ.get('ASTROPY_USE_SYSTEM_ALL', 0))):
for header in wcslib_headers:
source = join('cextern', 'wcslib', 'C', header)
dest = join('astropy', 'wcs', 'include', 'wcslib', header)
if newer_group([source], dest, 'newer'):
shutil.copy(source, dest)
return [Extension('astropy.wcs._wcs', **cfg)]
|
f8f050e69690ad8d2c249f2df0cd67ebb6cfbdf33e46328657a1fa3e3d09f8d7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Under the hood, there are 3 separate classes that perform different
# parts of the transformation:
#
# - `~astropy.wcs.Wcsprm`: Is a direct wrapper of the core WCS
# functionality in `wcslib`_. (This includes TPV and TPD
# polynomial distortion, but not SIP distortion).
#
# - `~astropy.wcs.Sip`: Handles polynomial distortion as defined in the
# `SIP`_ convention.
#
# - `~astropy.wcs.DistortionLookupTable`: Handles `distortion paper`_
# lookup tables.
#
# Additionally, the class `WCS` aggregates all of these transformations
# together in a pipeline:
#
# - Detector to image plane correction (by a pair of
# `~astropy.wcs.DistortionLookupTable` objects).
#
# - `SIP`_ distortion correction (by an underlying `~astropy.wcs.Sip`
# object)
#
# - `distortion paper`_ table-lookup correction (by a pair of
# `~astropy.wcs.DistortionLookupTable` objects).
#
# - `wcslib`_ WCS transformation (by a `~astropy.wcs.Wcsprm` object)
# STDLIB
import copy
import uuid
import io
import itertools
import os
import re
import textwrap
import warnings
import builtins
# THIRD-
from packaging.version import Version
import numpy as np
# LOCAL
from astropy import log
from astropy.io import fits
from . import docstrings
from . import _wcs
from astropy import units as u
from astropy.utils.compat import possible_filename
from astropy.utils.exceptions import AstropyWarning, AstropyUserWarning, AstropyDeprecationWarning
from astropy.utils.decorators import deprecated_renamed_argument
# Mix-in class that provides the APE 14 API
from .wcsapi.fitswcs import FITSWCSAPIMixin, SlicedFITSWCS
__all__ = ['FITSFixedWarning', 'WCS', 'find_all_wcs',
'DistortionLookupTable', 'Sip', 'Tabprm', 'Wcsprm', 'Auxprm',
'Celprm', 'Prjprm', 'Wtbarr', 'WCSBase', 'validate', 'WcsError',
'SingularMatrixError', 'InconsistentAxisTypesError',
'InvalidTransformError', 'InvalidCoordinateError',
'InvalidPrjParametersError', 'NoSolutionError',
'InvalidSubimageSpecificationError', 'NoConvergence',
'NonseparableSubimageCoordinateSystemError',
'NoWcsKeywordsFoundError', 'InvalidTabularParametersError']
__doctest_skip__ = ['WCS.all_world2pix']
if _wcs is not None:
if Version(_wcs.__version__) < Version("5.8"):
raise ImportError(
"astropy.wcs is built with wcslib {0}, but only versions 5.8 and "
"later on the 5.x series are known to work. The version of wcslib "
"that ships with astropy may be used.")
if not _wcs._sanity_check():
raise RuntimeError(
"astropy.wcs did not pass its sanity check for your build "
"on your platform.")
_WCSSUB_TIME_SUPPORT = Version(_wcs.__version__) >= Version("7.8")
_WCS_TPD_WARN_LT71 = Version(_wcs.__version__) < Version("7.1")
_WCS_TPD_WARN_LT74 = Version(_wcs.__version__) < Version("7.4")
WCSBase = _wcs._Wcs
DistortionLookupTable = _wcs.DistortionLookupTable
Sip = _wcs.Sip
Wcsprm = _wcs.Wcsprm
Auxprm = _wcs.Auxprm
Celprm = _wcs.Celprm
Prjprm = _wcs.Prjprm
Tabprm = _wcs.Tabprm
Wtbarr = _wcs.Wtbarr
WcsError = _wcs.WcsError
SingularMatrixError = _wcs.SingularMatrixError
InconsistentAxisTypesError = _wcs.InconsistentAxisTypesError
InvalidTransformError = _wcs.InvalidTransformError
InvalidCoordinateError = _wcs.InvalidCoordinateError
NoSolutionError = _wcs.NoSolutionError
InvalidSubimageSpecificationError = _wcs.InvalidSubimageSpecificationError
NonseparableSubimageCoordinateSystemError = _wcs.NonseparableSubimageCoordinateSystemError
NoWcsKeywordsFoundError = _wcs.NoWcsKeywordsFoundError
InvalidTabularParametersError = _wcs.InvalidTabularParametersError
InvalidPrjParametersError = _wcs.InvalidPrjParametersError
# Copy all the constants from the C extension into this module's namespace
for key, val in _wcs.__dict__.items():
if key.startswith(('WCSSUB_', 'WCSHDR_', 'WCSHDO_', 'WCSCOMPARE_', 'PRJ_')):
locals()[key] = val
__all__.append(key)
# Set coordinate extraction callback for WCS -TAB:
def _load_tab_bintable(hdulist, extnam, extver, extlev, kind, ttype, row, ndim):
arr = hdulist[(extnam, extver)].data[ttype][row - 1]
if arr.ndim != ndim:
if kind == 'c' and ndim == 2:
arr = arr.reshape((arr.size, 1))
else:
raise ValueError("Bad TDIM")
return np.ascontiguousarray(arr, dtype=np.double)
_wcs.set_wtbarr_fitsio_callback(_load_tab_bintable)
else:
WCSBase = object
Wcsprm = object
DistortionLookupTable = object
Sip = object
Tabprm = object
Wtbarr = object
WcsError = None
SingularMatrixError = None
InconsistentAxisTypesError = None
InvalidTransformError = None
InvalidCoordinateError = None
NoSolutionError = None
InvalidSubimageSpecificationError = None
NonseparableSubimageCoordinateSystemError = None
NoWcsKeywordsFoundError = None
InvalidTabularParametersError = None
_WCSSUB_TIME_SUPPORT = False
_WCS_TPD_WARN_LT71 = False
_WCS_TPD_WARN_LT74 = False
# Additional relax bit flags
WCSHDO_SIP = 0x80000
# Regular expression defining SIP keyword It matches keyword that starts with A
# or B, optionally followed by P, followed by an underscore then a number in
# range of 0-19, followed by an underscore and another number in range of 0-19.
# Keyword optionally ends with a capital letter.
SIP_KW = re.compile('''^[AB]P?_1?[0-9]_1?[0-9][A-Z]?$''')
def _parse_keysel(keysel):
keysel_flags = 0
if keysel is not None:
for element in keysel:
if element.lower() == 'image':
keysel_flags |= _wcs.WCSHDR_IMGHEAD
elif element.lower() == 'binary':
keysel_flags |= _wcs.WCSHDR_BIMGARR
elif element.lower() == 'pixel':
keysel_flags |= _wcs.WCSHDR_PIXLIST
else:
raise ValueError(
"keysel must be a list of 'image', 'binary' " +
"and/or 'pixel'")
else:
keysel_flags = -1
return keysel_flags
class NoConvergence(Exception):
"""
An error class used to report non-convergence and/or divergence
of numerical methods. It is used to report errors in the
iterative solution used by
the :py:meth:`~astropy.wcs.WCS.all_world2pix`.
Attributes
----------
best_solution : `numpy.ndarray`
Best solution achieved by the numerical method.
accuracy : `numpy.ndarray`
Accuracy of the ``best_solution``.
niter : `int`
Number of iterations performed by the numerical method
to compute ``best_solution``.
divergent : None, `numpy.ndarray`
Indices of the points in ``best_solution`` array
for which the solution appears to be divergent. If the
solution does not diverge, ``divergent`` will be set to `None`.
slow_conv : None, `numpy.ndarray`
Indices of the solutions in ``best_solution`` array
for which the solution failed to converge within the
specified maximum number of iterations. If there are no
non-converging solutions (i.e., if the required accuracy
has been achieved for all input data points)
then ``slow_conv`` will be set to `None`.
"""
def __init__(self, *args, best_solution=None, accuracy=None, niter=None,
divergent=None, slow_conv=None, **kwargs):
super().__init__(*args)
self.best_solution = best_solution
self.accuracy = accuracy
self.niter = niter
self.divergent = divergent
self.slow_conv = slow_conv
if kwargs:
warnings.warn("Function received unexpected arguments ({}) these "
"are ignored but will raise an Exception in the "
"future.".format(list(kwargs)),
AstropyDeprecationWarning)
class FITSFixedWarning(AstropyWarning):
"""
The warning raised when the contents of the FITS header have been
modified to be standards compliant.
"""
pass
class WCS(FITSWCSAPIMixin, WCSBase):
"""WCS objects perform standard WCS transformations, and correct for
`SIP`_ and `distortion paper`_ table-lookup transformations, based
on the WCS keywords and supplementary data read from a FITS file.
See also: https://docs.astropy.org/en/stable/wcs/
Parameters
----------
header : `~astropy.io.fits.Header`, `~astropy.io.fits.hdu.image.PrimaryHDU`, `~astropy.io.fits.hdu.image.ImageHDU`, str, dict-like, or None, optional
If *header* is not provided or None, the object will be
initialized to default values.
fobj : `~astropy.io.fits.HDUList`, optional
It is needed when header keywords point to a `distortion
paper`_ lookup table stored in a different extension.
key : str, optional
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the
``\"a\"`` part of the ``CTYPEia`` cards. *key* may only be
provided if *header* is also provided.
minerr : float, optional
The minimum value a distortion correction must have in order
to be applied. If the value of ``CQERRja`` is smaller than
*minerr*, the corresponding distortion is not applied.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions
of the WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`astropy:relaxread` for details.
naxis : int or sequence, optional
Extracts specific coordinate axes using
:meth:`~astropy.wcs.Wcsprm.sub`. If a header is provided, and
*naxis* is not ``None``, *naxis* will be passed to
:meth:`~astropy.wcs.Wcsprm.sub` in order to select specific
axes from the header. See :meth:`~astropy.wcs.Wcsprm.sub` for
more details about this parameter.
keysel : sequence of str, optional
A sequence of flags used to select the keyword types
considered by wcslib. When ``None``, only the standard image
header keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following
strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
colsel : sequence of int, optional
A sequence of table column numbers used to restrict the WCS
transformations considered to only those pertaining to the
specified columns. If `None`, there is no restriction.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting object to fix any non-standard uses in the
header. `FITSFixedWarning` Warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid key.
KeyError
Key not found in FITS header.
ValueError
Lookup table distortion present in the header but *fobj* was
not provided.
Notes
-----
1. astropy.wcs supports arbitrary *n* dimensions for the core WCS
(the transformations handled by WCSLIB). However, the
`distortion paper`_ lookup table and `SIP`_ distortions must be
two dimensional. Therefore, if you try to create a WCS object
where the core WCS has a different number of dimensions than 2
and that object also contains a `distortion paper`_ lookup
table or `SIP`_ distortion, a `ValueError`
exception will be raised. To avoid this, consider using the
*naxis* kwarg to select two dimensions from the core WCS.
2. The number of coordinate axes in the transformation is not
determined directly from the ``NAXIS`` keyword but instead from
the highest of:
- ``NAXIS`` keyword
- ``WCSAXESa`` keyword
- The highest axis number in any parameterized WCS keyword.
The keyvalue, as well as the keyword, must be
syntactically valid otherwise it will not be considered.
If none of these keyword types is present, i.e. if the header
only contains auxiliary WCS keywords for a particular
coordinate representation, then no coordinate description is
constructed for it.
The number of axes, which is set as the ``naxis`` member, may
differ for different coordinate representations of the same
image.
3. When the header includes duplicate keywords, in most cases the
last encountered is used.
4. `~astropy.wcs.Wcsprm.set` is called immediately after
construction, so any invalid keywords or transformations will
be raised by the constructor, not when subsequently calling a
transformation method.
""" # noqa: E501
def __init__(self, header=None, fobj=None, key=' ', minerr=0.0,
relax=True, naxis=None, keysel=None, colsel=None,
fix=True, translate_units='', _do_set=True):
close_fds = []
# these parameters are stored to be used when unpickling a WCS object:
self._init_kwargs = {
'keysel': copy.copy(keysel),
'colsel': copy.copy(colsel),
}
if header is None:
if naxis is None:
naxis = 2
wcsprm = _wcs.Wcsprm(header=None, key=key,
relax=relax, naxis=naxis)
self.naxis = wcsprm.naxis
# Set some reasonable defaults.
det2im = (None, None)
cpdis = (None, None)
sip = None
else:
keysel_flags = _parse_keysel(keysel)
if isinstance(header, (str, bytes)):
try:
is_path = (possible_filename(header) and
os.path.exists(header))
except (OSError, ValueError):
is_path = False
if is_path:
if fobj is not None:
raise ValueError(
"Can not provide both a FITS filename to "
"argument 1 and a FITS file object to argument 2")
fobj = fits.open(header)
close_fds.append(fobj)
header = fobj[0].header
elif isinstance(header, fits.hdu.image._ImageBaseHDU):
header = header.header
elif not isinstance(header, fits.Header):
try:
# Accept any dict-like object
orig_header = header
header = fits.Header()
for dict_key in orig_header.keys():
header[dict_key] = orig_header[dict_key]
except TypeError:
raise TypeError(
"header must be a string, an astropy.io.fits.Header "
"object, or a dict-like object")
if isinstance(header, fits.Header):
header_string = header.tostring().rstrip()
else:
header_string = header
# Importantly, header is a *copy* of the passed-in header
# because we will be modifying it
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode('ascii')
if not (fobj is None or isinstance(fobj, fits.HDUList)):
raise AssertionError("'fobj' must be either None or an "
"astropy.io.fits.HDUList object.")
est_naxis = 2
try:
tmp_header = fits.Header.fromstring(header_string)
self._remove_sip_kw(tmp_header)
tmp_header_bytes = tmp_header.tostring().rstrip()
if isinstance(tmp_header_bytes, str):
tmp_header_bytes = tmp_header_bytes.encode('ascii')
tmp_wcsprm = _wcs.Wcsprm(header=tmp_header_bytes, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel, warnings=False,
hdulist=fobj)
if naxis is not None:
try:
tmp_wcsprm = tmp_wcsprm.sub(naxis)
except ValueError:
pass
est_naxis = tmp_wcsprm.naxis if tmp_wcsprm.naxis else 2
except _wcs.NoWcsKeywordsFoundError:
pass
self.naxis = est_naxis
header = fits.Header.fromstring(header_string)
det2im = self._read_det2im_kw(header, fobj, err=minerr)
cpdis = self._read_distortion_kw(
header, fobj, dist='CPDIS', err=minerr)
sip = self._read_sip_kw(header, wcskey=key)
self._remove_sip_kw(header)
header_string = header.tostring()
header_string = header_string.replace('END' + ' ' * 77, '')
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode('ascii')
try:
wcsprm = _wcs.Wcsprm(header=header_bytes, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel, hdulist=fobj)
except _wcs.NoWcsKeywordsFoundError:
# The header may have SIP or distortions, but no core
# WCS. That isn't an error -- we want a "default"
# (identity) core Wcs transformation in that case.
if colsel is None:
wcsprm = _wcs.Wcsprm(header=None, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel, hdulist=fobj)
else:
raise
if naxis is not None:
wcsprm = wcsprm.sub(naxis)
self.naxis = wcsprm.naxis
if (wcsprm.naxis != 2 and
(det2im[0] or det2im[1] or cpdis[0] or cpdis[1] or sip)):
raise ValueError(
"""
FITS WCS distortion paper lookup tables and SIP distortions only work
in 2 dimensions. However, WCSLIB has detected {} dimensions in the
core WCS keywords. To use core WCS in conjunction with FITS WCS
distortion paper lookup tables or SIP distortion, you must select or
reduce these to 2 dimensions using the naxis kwarg.
""".format(wcsprm.naxis))
header_naxis = header.get('NAXIS', None)
if header_naxis is not None and header_naxis < wcsprm.naxis:
warnings.warn(
"The WCS transformation has more axes ({:d}) than the "
"image it is associated with ({:d})".format(
wcsprm.naxis, header_naxis), FITSFixedWarning)
self._get_naxis(header)
WCSBase.__init__(self, sip, cpdis, wcsprm, det2im)
if fix:
if header is None:
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
self.fix(translate_units=translate_units)
else:
self.fix(translate_units=translate_units)
if _do_set:
self.wcs.set()
for fd in close_fds:
fd.close()
self._pixel_bounds = None
def __copy__(self):
new_copy = self.__class__()
WCSBase.__init__(new_copy, self.sip,
(self.cpdis1, self.cpdis2),
self.wcs,
(self.det2im1, self.det2im2))
new_copy.__dict__.update(self.__dict__)
return new_copy
def __deepcopy__(self, memo):
from copy import deepcopy
new_copy = self.__class__()
new_copy.naxis = deepcopy(self.naxis, memo)
WCSBase.__init__(new_copy, deepcopy(self.sip, memo),
(deepcopy(self.cpdis1, memo),
deepcopy(self.cpdis2, memo)),
deepcopy(self.wcs, memo),
(deepcopy(self.det2im1, memo),
deepcopy(self.det2im2, memo)))
for key, val in self.__dict__.items():
new_copy.__dict__[key] = deepcopy(val, memo)
return new_copy
def copy(self):
"""
Return a shallow copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
.. warning::
Use `deepcopy` instead of `copy` unless you know why you need a
shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Return a deep copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
"""
return copy.deepcopy(self)
def sub(self, axes=None):
copy = self.deepcopy()
# We need to know which axes have been dropped, but there is no easy
# way to do this with the .sub function, so instead we assign UUIDs to
# the CNAME parameters in copy.wcs. We can later access the original
# CNAME properties from self.wcs.
cname_uuid = [str(uuid.uuid4()) for i in range(copy.wcs.naxis)]
copy.wcs.cname = cname_uuid
# Subset the WCS
copy.wcs = copy.wcs.sub(axes)
copy.naxis = copy.wcs.naxis
# Construct a list of dimensions from the original WCS in the order
# in which they appear in the final WCS.
keep = [cname_uuid.index(cname) if cname in cname_uuid else None
for cname in copy.wcs.cname]
# Restore the original CNAMEs
copy.wcs.cname = ['' if i is None else self.wcs.cname[i] for i in keep]
# Subset pixel_shape and pixel_bounds
if self.pixel_shape:
copy.pixel_shape = tuple(None if i is None else self.pixel_shape[i] for i in keep)
if self.pixel_bounds:
copy.pixel_bounds = [None if i is None else self.pixel_bounds[i] for i in keep]
return copy
if _wcs is not None:
sub.__doc__ = _wcs.Wcsprm.sub.__doc__
def _fix_scamp(self):
"""
Remove SCAMP's PVi_m distortion parameters if SIP distortion parameters
are also present. Some projects (e.g., Palomar Transient Factory)
convert SCAMP's distortion parameters (which abuse the PVi_m cards) to
SIP. However, wcslib gets confused by the presence of both SCAMP and
SIP distortion parameters.
See https://github.com/astropy/astropy/issues/299.
"""
# Nothing to be done if no WCS attached
if self.wcs is None:
return
# Nothing to be done if no PV parameters attached
pv = self.wcs.get_pv()
if not pv:
return
# Nothing to be done if axes don't use SIP distortion parameters
if self.sip is None:
return
# Nothing to be done if any radial terms are present...
# Loop over list to find any radial terms.
# Certain values of the `j' index are used for storing
# radial terms; refer to Equation (1) in
# <http://web.ipac.caltech.edu/staff/shupe/reprints/SIP_to_PV_SPIE2012.pdf>.
pv = np.asarray(pv)
# Loop over distinct values of `i' index
for i in set(pv[:, 0]):
# Get all values of `j' index for this value of `i' index
js = set(pv[:, 1][pv[:, 0] == i])
# Find max value of `j' index
max_j = max(js)
for j in (3, 11, 23, 39):
if j < max_j and j in js:
return
self.wcs.set_pv([])
warnings.warn("Removed redundant SCAMP distortion parameters " +
"because SIP parameters are also present", FITSFixedWarning)
def fix(self, translate_units='', naxis=None):
"""
Perform the fix operations from wcslib, and warn about any
changes it has made.
Parameters
----------
translate_units : str, optional
Specify which potentially unsafe translations of
non-standard unit strings to perform. By default,
performs none.
Although ``"S"`` is commonly used to represent seconds,
its translation to ``"s"`` is potentially unsafe since the
standard recognizes ``"S"`` formally as Siemens, however
rarely that may be used. The same applies to ``"H"`` for
hours (Henry), and ``"D"`` for days (Debye).
This string controls what to do in such cases, and is
case-insensitive.
- If the string contains ``"s"``, translate ``"S"`` to
``"s"``.
- If the string contains ``"h"``, translate ``"H"`` to
``"h"``.
- If the string contains ``"d"``, translate ``"D"`` to
``"d"``.
Thus ``''`` doesn't do any unsafe translations, whereas
``'shd'`` does all of them.
naxis : int array, optional
Image axis lengths. If this array is set to zero or
``None``, then `~astropy.wcs.Wcsprm.cylfix` will not be
invoked.
"""
if self.wcs is not None:
self._fix_scamp()
fixes = self.wcs.fix(translate_units, naxis)
for key, val in fixes.items():
if val != "No change":
if (key == 'datfix' and '1858-11-17' in val and
not np.count_nonzero(self.wcs.mjdref)):
continue
warnings.warn(
("'{0}' made the change '{1}'.").
format(key, val),
FITSFixedWarning)
def calc_footprint(self, header=None, undistort=True, axes=None, center=True):
"""
Calculates the footprint of the image on the sky.
A footprint is defined as the positions of the corners of the
image on the sky after all available distortions have been
applied.
Parameters
----------
header : `~astropy.io.fits.Header` object, optional
Used to get ``NAXIS1`` and ``NAXIS2``
header and axes are mutually exclusive, alternative ways
to provide the same information.
undistort : bool, optional
If `True`, take SIP and distortion lookup table into
account
axes : (int, int), optional
If provided, use the given sequence as the shape of the
image. Otherwise, use the ``NAXIS1`` and ``NAXIS2``
keywords from the header that was used to create this
`WCS` object.
center : bool, optional
If `True` use the center of the pixel, otherwise use the corner.
Returns
-------
coord : (4, 2) array of (*x*, *y*) coordinates.
The order is clockwise starting with the bottom left corner.
"""
if axes is not None:
naxis1, naxis2 = axes
else:
if header is None:
try:
# classes that inherit from WCS and define naxis1/2
# do not require a header parameter
naxis1, naxis2 = self.pixel_shape
except (AttributeError, TypeError):
warnings.warn(
"Need a valid header in order to calculate footprint\n", AstropyUserWarning)
return None
else:
naxis1 = header.get('NAXIS1', None)
naxis2 = header.get('NAXIS2', None)
if naxis1 is None or naxis2 is None:
raise ValueError(
"Image size could not be determined.")
if center:
corners = np.array([[1, 1],
[1, naxis2],
[naxis1, naxis2],
[naxis1, 1]], dtype=np.float64)
else:
corners = np.array([[0.5, 0.5],
[0.5, naxis2 + 0.5],
[naxis1 + 0.5, naxis2 + 0.5],
[naxis1 + 0.5, 0.5]], dtype=np.float64)
if undistort:
return self.all_pix2world(corners, 1)
else:
return self.wcs_pix2world(corners, 1)
def _read_det2im_kw(self, header, fobj, err=0.0):
"""
Create a `distortion paper`_ type lookup table for detector to
image plane correction.
"""
if fobj is None:
return (None, None)
if not isinstance(fobj, fits.HDUList):
return (None, None)
try:
axiscorr = header['AXISCORR']
d2imdis = self._read_d2im_old_format(header, fobj, axiscorr)
return d2imdis
except KeyError:
pass
dist = 'D2IMDIS'
d_kw = 'D2IM'
err_kw = 'D2IMERR'
tables = {}
for i in range(1, self.naxis + 1):
d_error = header.get(err_kw + str(i), 0.0)
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
if dis == 'lookup':
del header[distortion]
assert isinstance(fobj, fits.HDUList), (
'An astropy.io.fits.HDUList'
'is required for Lookup table distortion.')
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + '.EXTVER'
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + f'.AXIS.{i:d}'
if i == header[dp_axis_key]:
d_data = fobj['D2IMARR', d_extver].data
else:
d_data = (fobj['D2IMARR', d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj['D2IMARR', d_extver].header
d_crpix = (d_header.get('CRPIX1', 0.0), d_header.get('CRPIX2', 0.0))
d_crval = (d_header.get('CRVAL1', 0.0), d_header.get('CRVAL2', 0.0))
d_cdelt = (d_header.get('CDELT1', 1.0), d_header.get('CDELT2', 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix,
d_crval, d_cdelt)
tables[i] = d_lookup
else:
warnings.warn('Polynomial distortion is not implemented.\n', AstropyUserWarning)
for key in set(header):
if key.startswith(dp + '.'):
del header[key]
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _read_d2im_old_format(self, header, fobj, axiscorr):
warnings.warn(
"The use of ``AXISCORR`` for D2IM correction has been deprecated."
"`~astropy.wcs` will read in files with ``AXISCORR`` but ``to_fits()`` will write "
"out files without it.",
AstropyDeprecationWarning)
cpdis = [None, None]
crpix = [0., 0.]
crval = [0., 0.]
cdelt = [1., 1.]
try:
d2im_data = fobj[('D2IMARR', 1)].data
except KeyError:
return (None, None)
except AttributeError:
return (None, None)
d2im_data = np.array([d2im_data])
d2im_hdr = fobj[('D2IMARR', 1)].header
naxis = d2im_hdr['NAXIS']
for i in range(1, naxis + 1):
crpix[i - 1] = d2im_hdr.get('CRPIX' + str(i), 0.0)
crval[i - 1] = d2im_hdr.get('CRVAL' + str(i), 0.0)
cdelt[i - 1] = d2im_hdr.get('CDELT' + str(i), 1.0)
cpdis = DistortionLookupTable(d2im_data, crpix, crval, cdelt)
if axiscorr == 1:
return (cpdis, None)
elif axiscorr == 2:
return (None, cpdis)
else:
warnings.warn("Expected AXISCORR to be 1 or 2", AstropyUserWarning)
return (None, None)
def _write_det2im(self, hdulist):
"""
Writes a `distortion paper`_ type lookup table to the given
`~astropy.io.fits.HDUList`.
"""
if self.det2im1 is None and self.det2im2 is None:
return
dist = 'D2IMDIS'
d_kw = 'D2IM'
def write_d2i(num, det2im):
if det2im is None:
return
hdulist[0].header[f'{dist}{num:d}'] = (
'LOOKUP', 'Detector to image correction type')
hdulist[0].header[f'{d_kw}{num:d}.EXTVER'] = (
num, 'Version number of WCSDVARR extension')
hdulist[0].header[f'{d_kw}{num:d}.NAXES'] = (
len(det2im.data.shape), 'Number of independent variables in D2IM function')
for i in range(det2im.data.ndim):
jth = {1: '1st', 2: '2nd', 3: '3rd'}.get(i + 1, f'{i + 1}th')
hdulist[0].header[f'{d_kw}{num:d}.AXIS.{i + 1:d}'] = (
i + 1, f'Axis number of the {jth} variable in a D2IM function')
image = fits.ImageHDU(det2im.data, name='D2IMARR')
header = image.header
header['CRPIX1'] = (det2im.crpix[0],
'Coordinate system reference pixel')
header['CRPIX2'] = (det2im.crpix[1],
'Coordinate system reference pixel')
header['CRVAL1'] = (det2im.crval[0],
'Coordinate system value at reference pixel')
header['CRVAL2'] = (det2im.crval[1],
'Coordinate system value at reference pixel')
header['CDELT1'] = (det2im.cdelt[0],
'Coordinate increment along axis')
header['CDELT2'] = (det2im.cdelt[1],
'Coordinate increment along axis')
image.ver = int(hdulist[0].header[f'{d_kw}{num:d}.EXTVER'])
hdulist.append(image)
write_d2i(1, self.det2im1)
write_d2i(2, self.det2im2)
def _read_distortion_kw(self, header, fobj, dist='CPDIS', err=0.0):
"""
Reads `distortion paper`_ table-lookup keywords and data, and
returns a 2-tuple of `~astropy.wcs.DistortionLookupTable`
objects.
If no `distortion paper`_ keywords are found, ``(None, None)``
is returned.
"""
if isinstance(header, (str, bytes)):
return (None, None)
if dist == 'CPDIS':
d_kw = 'DP'
err_kw = 'CPERR'
else:
d_kw = 'DQ'
err_kw = 'CQERR'
tables = {}
for i in range(1, self.naxis + 1):
d_error_key = err_kw + str(i)
if d_error_key in header:
d_error = header[d_error_key]
del header[d_error_key]
else:
d_error = 0.0
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
del header[distortion]
if dis == 'lookup':
if not isinstance(fobj, fits.HDUList):
raise ValueError('an astropy.io.fits.HDUList is '
'required for Lookup table distortion.')
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + '.EXTVER'
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + f'.AXIS.{i:d}'
if i == header[dp_axis_key]:
d_data = fobj['WCSDVARR', d_extver].data
else:
d_data = (fobj['WCSDVARR', d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj['WCSDVARR', d_extver].header
d_crpix = (d_header.get('CRPIX1', 0.0),
d_header.get('CRPIX2', 0.0))
d_crval = (d_header.get('CRVAL1', 0.0),
d_header.get('CRVAL2', 0.0))
d_cdelt = (d_header.get('CDELT1', 1.0),
d_header.get('CDELT2', 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt)
tables[i] = d_lookup
for key in set(header):
if key.startswith(dp + '.'):
del header[key]
else:
warnings.warn('Polynomial distortion is not implemented.\n', AstropyUserWarning)
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _write_distortion_kw(self, hdulist, dist='CPDIS'):
"""
Write out `distortion paper`_ keywords to the given
`~astropy.io.fits.HDUList`.
"""
if self.cpdis1 is None and self.cpdis2 is None:
return
if dist == 'CPDIS':
d_kw = 'DP'
else:
d_kw = 'DQ'
def write_dist(num, cpdis):
if cpdis is None:
return
hdulist[0].header[f'{dist}{num:d}'] = (
'LOOKUP', 'Prior distortion function type')
hdulist[0].header[f'{d_kw}{num:d}.EXTVER'] = (
num, 'Version number of WCSDVARR extension')
hdulist[0].header[f'{d_kw}{num:d}.NAXES'] = (
len(cpdis.data.shape), f'Number of independent variables in {dist} function')
for i in range(cpdis.data.ndim):
jth = {1: '1st', 2: '2nd', 3: '3rd'}.get(i + 1, f'{i + 1}th')
hdulist[0].header[f'{d_kw}{num:d}.AXIS.{i + 1:d}'] = (
i + 1,
f'Axis number of the {jth} variable in a {dist} function')
image = fits.ImageHDU(cpdis.data, name='WCSDVARR')
header = image.header
header['CRPIX1'] = (cpdis.crpix[0], 'Coordinate system reference pixel')
header['CRPIX2'] = (cpdis.crpix[1], 'Coordinate system reference pixel')
header['CRVAL1'] = (cpdis.crval[0], 'Coordinate system value at reference pixel')
header['CRVAL2'] = (cpdis.crval[1], 'Coordinate system value at reference pixel')
header['CDELT1'] = (cpdis.cdelt[0], 'Coordinate increment along axis')
header['CDELT2'] = (cpdis.cdelt[1], 'Coordinate increment along axis')
image.ver = int(hdulist[0].header[f'{d_kw}{num:d}.EXTVER'])
hdulist.append(image)
write_dist(1, self.cpdis1)
write_dist(2, self.cpdis2)
def _remove_sip_kw(self, header):
"""
Remove SIP information from a header.
"""
# Never pass SIP coefficients to wcslib
# CTYPE must be passed with -SIP to wcslib
for key in {m.group() for m in map(SIP_KW.match, list(header))
if m is not None}:
del header[key]
def _read_sip_kw(self, header, wcskey=""):
"""
Reads `SIP`_ header keywords and returns a `~astropy.wcs.Sip`
object.
If no `SIP`_ header keywords are found, ``None`` is returned.
"""
if isinstance(header, (str, bytes)):
# TODO: Parse SIP from a string without pyfits around
return None
if "A_ORDER" in header and header['A_ORDER'] > 1:
if "B_ORDER" not in header:
raise ValueError(
"A_ORDER provided without corresponding B_ORDER "
"keyword for SIP distortion")
m = int(header["A_ORDER"])
a = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"A_{i}_{j}"
if key in header:
a[i, j] = header[key]
del header[key]
m = int(header["B_ORDER"])
if m > 1:
b = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"B_{i}_{j}"
if key in header:
b[i, j] = header[key]
del header[key]
else:
a = None
b = None
del header['A_ORDER']
del header['B_ORDER']
ctype = [header[f'CTYPE{nax}{wcskey}'] for nax in range(1, self.naxis + 1)]
if any(not ctyp.endswith('-SIP') for ctyp in ctype):
message = """
Inconsistent SIP distortion information is present in the FITS header and the WCS object:
SIP coefficients were detected, but CTYPE is missing a "-SIP" suffix.
astropy.wcs is using the SIP distortion coefficients,
therefore the coordinates calculated here might be incorrect.
If you do not want to apply the SIP distortion coefficients,
please remove the SIP coefficients from the FITS header or the
WCS object. As an example, if the image is already distortion-corrected
(e.g., drizzled) then distortion components should not apply and the SIP
coefficients should be removed.
While the SIP distortion coefficients are being applied here, if that was indeed the intent,
for consistency please append "-SIP" to the CTYPE in the FITS header or the WCS object.
""" # noqa: E501
log.info(message)
elif "B_ORDER" in header and header['B_ORDER'] > 1:
raise ValueError(
"B_ORDER provided without corresponding A_ORDER " +
"keyword for SIP distortion")
else:
a = None
b = None
if "AP_ORDER" in header and header['AP_ORDER'] > 1:
if "BP_ORDER" not in header:
raise ValueError(
"AP_ORDER provided without corresponding BP_ORDER "
"keyword for SIP distortion")
m = int(header["AP_ORDER"])
ap = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"AP_{i}_{j}"
if key in header:
ap[i, j] = header[key]
del header[key]
m = int(header["BP_ORDER"])
if m > 1:
bp = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"BP_{i}_{j}"
if key in header:
bp[i, j] = header[key]
del header[key]
else:
ap = None
bp = None
del header['AP_ORDER']
del header['BP_ORDER']
elif "BP_ORDER" in header and header['BP_ORDER'] > 1:
raise ValueError(
"BP_ORDER provided without corresponding AP_ORDER "
"keyword for SIP distortion")
else:
ap = None
bp = None
if a is None and b is None and ap is None and bp is None:
return None
if f"CRPIX1{wcskey}" not in header or f"CRPIX2{wcskey}" not in header:
raise ValueError(
"Header has SIP keywords without CRPIX keywords")
crpix1 = header.get(f"CRPIX1{wcskey}")
crpix2 = header.get(f"CRPIX2{wcskey}")
return Sip(a, b, ap, bp, (crpix1, crpix2))
def _write_sip_kw(self):
"""
Write out SIP keywords. Returns a dictionary of key-value
pairs.
"""
if self.sip is None:
return {}
keywords = {}
def write_array(name, a):
if a is None:
return
size = a.shape[0]
trdir = 'sky to detector' if name[-1] == 'P' else 'detector to sky'
comment = ('SIP polynomial order, axis {:d}, {:s}'
.format(ord(name[0]) - ord('A'), trdir))
keywords[f'{name}_ORDER'] = size - 1, comment
comment = 'SIP distortion coefficient'
for i in range(size):
for j in range(size - i):
if a[i, j] != 0.0:
keywords[
f'{name}_{i:d}_{j:d}'] = a[i, j], comment
write_array('A', self.sip.a)
write_array('B', self.sip.b)
write_array('AP', self.sip.ap)
write_array('BP', self.sip.bp)
return keywords
def _denormalize_sky(self, sky):
if self.wcs.lngtyp != 'RA':
raise ValueError(
"WCS does not have longitude type of 'RA', therefore " +
"(ra, dec) data can not be used as input")
if self.wcs.lattyp != 'DEC':
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore " +
"(ra, dec) data can not be used as input")
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial " +
"axes, therefore (ra, dec) data can not be used as input")
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude "
"celestial axes, therefore (ra, dec) data can not be " +
"used as input")
out = np.zeros((sky.shape[0], self.wcs.naxis))
out[:, self.wcs.lng] = sky[:, 0]
out[:, self.wcs.lat] = sky[:, 1]
return out
def _normalize_sky(self, sky):
if self.wcs.lngtyp != 'RA':
raise ValueError(
"WCS does not have longitude type of 'RA', therefore " +
"(ra, dec) data can not be returned")
if self.wcs.lattyp != 'DEC':
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore " +
"(ra, dec) data can not be returned")
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned")
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned")
out = np.empty((sky.shape[0], 2))
out[:, 0] = sky[:, self.wcs.lng]
out[:, 1] = sky[:, self.wcs.lat]
return out
def _array_converter(self, func, sky, *args, ra_dec_order=False):
"""
A helper function to support reading either a pair of arrays
or a single Nx2 array.
"""
def _return_list_of_arrays(axes, origin):
if any([x.size == 0 for x in axes]):
return axes
try:
axes = np.broadcast_arrays(*axes)
except ValueError:
raise ValueError(
"Coordinate arrays are not broadcastable to each other")
xy = np.hstack([x.reshape((x.size, 1)) for x in axes])
if ra_dec_order and sky == 'input':
xy = self._denormalize_sky(xy)
output = func(xy, origin)
if ra_dec_order and sky == 'output':
output = self._normalize_sky(output)
return (output[:, 0].reshape(axes[0].shape),
output[:, 1].reshape(axes[0].shape))
return [output[:, i].reshape(axes[0].shape)
for i in range(output.shape[1])]
def _return_single_array(xy, origin):
if xy.shape[-1] != self.naxis:
raise ValueError(
"When providing two arguments, the array must be "
"of shape (N, {})".format(self.naxis))
if 0 in xy.shape:
return xy
if ra_dec_order and sky == 'input':
xy = self._denormalize_sky(xy)
result = func(xy, origin)
if ra_dec_order and sky == 'output':
result = self._normalize_sky(result)
return result
if len(args) == 2:
try:
xy, origin = args
xy = np.asarray(xy)
origin = int(origin)
except Exception:
raise TypeError(
"When providing two arguments, they must be "
"(coords[N][{}], origin)".format(self.naxis))
if xy.shape == () or len(xy.shape) == 1:
return _return_list_of_arrays([xy], origin)
return _return_single_array(xy, origin)
elif len(args) == self.naxis + 1:
axes = args[:-1]
origin = args[-1]
try:
axes = [np.asarray(x) for x in axes]
origin = int(origin)
except Exception:
raise TypeError(
"When providing more than two arguments, they must be " +
"a 1-D array for each axis, followed by an origin.")
return _return_list_of_arrays(axes, origin)
raise TypeError(
"WCS projection has {0} dimensions, so expected 2 (an Nx{0} array "
"and the origin argument) or {1} arguments (the position in each "
"dimension, and the origin argument). Instead, {2} arguments were "
"given.".format(
self.naxis, self.naxis + 1, len(args)))
def all_pix2world(self, *args, **kwargs):
return self._array_converter(
self._all_pix2world, 'output', *args, **kwargs)
all_pix2world.__doc__ = """
Transforms pixel coordinates to world coordinates.
Performs all of the following in series:
- Detector to image plane correction (if present in the
FITS file)
- `SIP`_ distortion correction (if present in the FITS
file)
- `distortion paper`_ table-lookup correction (if present
in the FITS file)
- `wcslib`_ "core" WCS transformation
Parameters
----------
{}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{}
Returns
-------
{}
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('sky coordinates, in degrees', 8))
def wcs_pix2world(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.p2s(xy, o)['world'],
'output', *args, **kwargs)
wcs_pix2world.__doc__ = """
Transforms pixel coordinates to world coordinates by doing
only the basic `wcslib`_ transformation.
No `SIP`_ or `distortion paper`_ table lookup correction is
applied. To perform distortion correction, see
`~astropy.wcs.WCS.all_pix2world`,
`~astropy.wcs.WCS.sip_pix2foc`, `~astropy.wcs.WCS.p4_pix2foc`,
or `~astropy.wcs.WCS.pix2foc`.
Parameters
----------
{}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('world coordinates, in degrees', 8))
def _all_world2pix(self, world, origin, tolerance, maxiter, adaptive,
detect_divergence, quiet):
# ############################################################
# # DESCRIPTION OF THE NUMERICAL METHOD ##
# ############################################################
# In this section I will outline the method of solving
# the inverse problem of converting world coordinates to
# pixel coordinates (*inverse* of the direct transformation
# `all_pix2world`) and I will summarize some of the aspects
# of the method proposed here and some of the issues of the
# original `all_world2pix` (in relation to this method)
# discussed in https://github.com/astropy/astropy/issues/1977
# A more detailed discussion can be found here:
# https://github.com/astropy/astropy/pull/2373
#
#
# ### Background ###
#
#
# I will refer here to the [SIP Paper]
# (http://fits.gsfc.nasa.gov/registry/sip/SIP_distortion_v1_0.pdf).
# According to this paper, the effect of distortions as
# described in *their* equation (1) is:
#
# (1) x = CD*(u+f(u)),
#
# where `x` is a *vector* of "intermediate spherical
# coordinates" (equivalent to (x,y) in the paper) and `u`
# is a *vector* of "pixel coordinates", and `f` is a vector
# function describing geometrical distortions
# (see equations 2 and 3 in SIP Paper.
# However, I prefer to use `w` for "intermediate world
# coordinates", `x` for pixel coordinates, and assume that
# transformation `W` performs the **linear**
# (CD matrix + projection onto celestial sphere) part of the
# conversion from pixel coordinates to world coordinates.
# Then we can re-write (1) as:
#
# (2) w = W*(x+f(x)) = T(x)
#
# In `astropy.wcs.WCS` transformation `W` is represented by
# the `wcs_pix2world` member, while the combined ("total")
# transformation (linear part + distortions) is performed by
# `all_pix2world`. Below I summarize the notations and their
# equivalents in `astropy.wcs.WCS`:
#
# | Equation term | astropy.WCS/meaning |
# | ------------- | ---------------------------- |
# | `x` | pixel coordinates |
# | `w` | world coordinates |
# | `W` | `wcs_pix2world()` |
# | `W^{-1}` | `wcs_world2pix()` |
# | `T` | `all_pix2world()` |
# | `x+f(x)` | `pix2foc()` |
#
#
# ### Direct Solving of Equation (2) ###
#
#
# In order to find the pixel coordinates that correspond to
# given world coordinates `w`, it is necessary to invert
# equation (2): `x=T^{-1}(w)`, or solve equation `w==T(x)`
# for `x`. However, this approach has the following
# disadvantages:
# 1. It requires unnecessary transformations (see next
# section).
# 2. It is prone to "RA wrapping" issues as described in
# https://github.com/astropy/astropy/issues/1977
# (essentially because `all_pix2world` may return points with
# a different phase than user's input `w`).
#
#
# ### Description of the Method Used here ###
#
#
# By applying inverse linear WCS transformation (`W^{-1}`)
# to both sides of equation (2) and introducing notation `x'`
# (prime) for the pixels coordinates obtained from the world
# coordinates by applying inverse *linear* WCS transformation
# ("focal plane coordinates"):
#
# (3) x' = W^{-1}(w)
#
# we obtain the following equation:
#
# (4) x' = x+f(x),
#
# or,
#
# (5) x = x'-f(x)
#
# This equation is well suited for solving using the method
# of fixed-point iterations
# (http://en.wikipedia.org/wiki/Fixed-point_iteration):
#
# (6) x_{i+1} = x'-f(x_i)
#
# As an initial value of the pixel coordinate `x_0` we take
# "focal plane coordinate" `x'=W^{-1}(w)=wcs_world2pix(w)`.
# We stop iterations when `|x_{i+1}-x_i|<tolerance`. We also
# consider the process to be diverging if
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|`
# **when** `|x_{i+1}-x_i|>=tolerance` (when current
# approximation is close to the true solution,
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|` may be due to rounding errors
# and we ignore such "divergences" when
# `|x_{i+1}-x_i|<tolerance`). It may appear that checking for
# `|x_{i+1}-x_i|<tolerance` in order to ignore divergence is
# unnecessary since the iterative process should stop anyway,
# however, the proposed implementation of this iterative
# process is completely vectorized and, therefore, we may
# continue iterating over *some* points even though they have
# converged to within a specified tolerance (while iterating
# over other points that have not yet converged to
# a solution).
#
# In order to efficiently implement iterative process (6)
# using available methods in `astropy.wcs.WCS`, we add and
# subtract `x_i` from the right side of equation (6):
#
# (7) x_{i+1} = x'-(x_i+f(x_i))+x_i = x'-pix2foc(x_i)+x_i,
#
# where `x'=wcs_world2pix(w)` and it is computed only *once*
# before the beginning of the iterative process (and we also
# set `x_0=x'`). By using `pix2foc` at each iteration instead
# of `all_pix2world` we get about 25% increase in performance
# (by not performing the linear `W` transformation at each
# step) and we also avoid the "RA wrapping" issue described
# above (by working in focal plane coordinates and avoiding
# pix->world transformations).
#
# As an added benefit, the process converges to the correct
# solution in just one iteration when distortions are not
# present (compare to
# https://github.com/astropy/astropy/issues/1977 and
# https://github.com/astropy/astropy/pull/2294): in this case
# `pix2foc` is the identical transformation
# `x_i=pix2foc(x_i)` and from equation (7) we get:
#
# x' = x_0 = wcs_world2pix(w)
# x_1 = x' - pix2foc(x_0) + x_0 = x' - pix2foc(x') + x' = x'
# = wcs_world2pix(w) = x_0
# =>
# |x_1-x_0| = 0 < tolerance (with tolerance > 0)
#
# However, for performance reasons, it is still better to
# avoid iterations altogether and return the exact linear
# solution (`wcs_world2pix`) right-away when non-linear
# distortions are not present by checking that attributes
# `sip`, `cpdis1`, `cpdis2`, `det2im1`, and `det2im2` are
# *all* `None`.
#
#
# ### Outline of the Algorithm ###
#
#
# While the proposed code is relatively long (considering
# the simplicity of the algorithm), this is due to: 1)
# checking if iterative solution is necessary at all; 2)
# checking for divergence; 3) re-implementation of the
# completely vectorized algorithm as an "adaptive" vectorized
# algorithm (for cases when some points diverge for which we
# want to stop iterations). In my tests, the adaptive version
# of the algorithm is about 50% slower than non-adaptive
# version for all HST images.
#
# The essential part of the vectorized non-adaptive algorithm
# (without divergence and other checks) can be described
# as follows:
#
# pix0 = self.wcs_world2pix(world, origin)
# pix = pix0.copy() # 0-order solution
#
# for k in range(maxiter):
# # find correction to the previous solution:
# dpix = self.pix2foc(pix, origin) - pix0
#
# # compute norm (L2) of the correction:
# dn = np.linalg.norm(dpix, axis=1)
#
# # apply correction:
# pix -= dpix
#
# # check convergence:
# if np.max(dn) < tolerance:
# break
#
# return pix
#
# Here, the input parameter `world` can be a `MxN` array
# where `M` is the number of coordinate axes in WCS and `N`
# is the number of points to be converted simultaneously to
# image coordinates.
#
#
# ### IMPORTANT NOTE: ###
#
# If, in the future releases of the `~astropy.wcs`,
# `pix2foc` will not apply all the required distortion
# corrections then in the code below, calls to `pix2foc` will
# have to be replaced with
# wcs_world2pix(all_pix2world(pix_list, origin), origin)
#
# ############################################################
# # INITIALIZE ITERATIVE PROCESS: ##
# ############################################################
# initial approximation (linear WCS based only)
pix0 = self.wcs_world2pix(world, origin)
# Check that an iterative solution is required at all
# (when any of the non-CD-matrix-based corrections are
# present). If not required return the initial
# approximation (pix0).
if not self.has_distortion:
# No non-WCS corrections detected so
# simply return initial approximation:
return pix0
pix = pix0.copy() # 0-order solution
# initial correction:
dpix = self.pix2foc(pix, origin) - pix0
# Update initial solution:
pix -= dpix
# Norm (L2) squared of the correction:
dn = np.sum(dpix*dpix, axis=1)
dnprev = dn.copy() # if adaptive else dn
tol2 = tolerance**2
# Prepare for iterative process
k = 1
ind = None
inddiv = None
# Turn off numpy runtime warnings for 'invalid' and 'over':
old_invalid = np.geterr()['invalid']
old_over = np.geterr()['over']
np.seterr(invalid='ignore', over='ignore')
# ############################################################
# # NON-ADAPTIVE ITERATIONS: ##
# ############################################################
if not adaptive:
# Fixed-point iterations:
while (np.nanmax(dn) >= tol2 and k < maxiter):
# Find correction to the previous solution:
dpix = self.pix2foc(pix, origin) - pix0
# Compute norm (L2) squared of the correction:
dn = np.sum(dpix*dpix, axis=1)
# Check for divergence (we do this in two stages
# to optimize performance for the most common
# scenario when successive approximations converge):
if detect_divergence:
divergent = (dn >= dnprev)
if np.any(divergent):
# Find solutions that have not yet converged:
slowconv = (dn >= tol2)
inddiv, = np.where(divergent & slowconv)
if inddiv.shape[0] > 0:
# Update indices of elements that
# still need correction:
conv = (dn < dnprev)
iconv = np.where(conv)
# Apply correction:
dpixgood = dpix[iconv]
pix[iconv] -= dpixgood
dpix[iconv] = dpixgood
# For the next iteration choose
# non-divergent points that have not yet
# converged to the requested accuracy:
ind, = np.where(slowconv & conv)
pix0 = pix0[ind]
dnprev[ind] = dn[ind]
k += 1
# Switch to adaptive iterations:
adaptive = True
break
# Save current correction magnitudes for later:
dnprev = dn
# Apply correction:
pix -= dpix
k += 1
# ############################################################
# # ADAPTIVE ITERATIONS: ##
# ############################################################
if adaptive:
if ind is None:
ind, = np.where(np.isfinite(pix).all(axis=1))
pix0 = pix0[ind]
# "Adaptive" fixed-point iterations:
while (ind.shape[0] > 0 and k < maxiter):
# Find correction to the previous solution:
dpixnew = self.pix2foc(pix[ind], origin) - pix0
# Compute norm (L2) of the correction:
dnnew = np.sum(np.square(dpixnew), axis=1)
# Bookkeeping of corrections:
dnprev[ind] = dn[ind].copy()
dn[ind] = dnnew
if detect_divergence:
# Find indices of pixels that are converging:
conv = (dnnew < dnprev[ind])
iconv = np.where(conv)
iiconv = ind[iconv]
# Apply correction:
dpixgood = dpixnew[iconv]
pix[iiconv] -= dpixgood
dpix[iiconv] = dpixgood
# Find indices of solutions that have not yet
# converged to the requested accuracy
# AND that do not diverge:
subind, = np.where((dnnew >= tol2) & conv)
else:
# Apply correction:
pix[ind] -= dpixnew
dpix[ind] = dpixnew
# Find indices of solutions that have not yet
# converged to the requested accuracy:
subind, = np.where(dnnew >= tol2)
# Choose solutions that need more iterations:
ind = ind[subind]
pix0 = pix0[subind]
k += 1
# ############################################################
# # FINAL DETECTION OF INVALID, DIVERGING, ##
# # AND FAILED-TO-CONVERGE POINTS ##
# ############################################################
# Identify diverging and/or invalid points:
invalid = ((~np.all(np.isfinite(pix), axis=1)) &
(np.all(np.isfinite(world), axis=1)))
# When detect_divergence==False, dnprev is outdated
# (it is the norm of the very first correction).
# Still better than nothing...
inddiv, = np.where(((dn >= tol2) & (dn >= dnprev)) | invalid)
if inddiv.shape[0] == 0:
inddiv = None
# Identify points that did not converge within 'maxiter'
# iterations:
if k >= maxiter:
ind, = np.where((dn >= tol2) & (dn < dnprev) & (~invalid))
if ind.shape[0] == 0:
ind = None
else:
ind = None
# Restore previous numpy error settings:
np.seterr(invalid=old_invalid, over=old_over)
# ############################################################
# # RAISE EXCEPTION IF DIVERGING OR TOO SLOWLY CONVERGING ##
# # DATA POINTS HAVE BEEN DETECTED: ##
# ############################################################
if (ind is not None or inddiv is not None) and not quiet:
if inddiv is None:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy after {:d} "
"iterations.".format(k), best_solution=pix,
accuracy=np.abs(dpix), niter=k,
slow_conv=ind, divergent=None)
else:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy.\n"
"After {:d} iterations, the solution is diverging "
"at least for one input point."
.format(k), best_solution=pix,
accuracy=np.abs(dpix), niter=k,
slow_conv=ind, divergent=inddiv)
return pix
@deprecated_renamed_argument('accuracy', 'tolerance', '4.3')
def all_world2pix(self, *args, tolerance=1e-4, maxiter=20, adaptive=False,
detect_divergence=True, quiet=False, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda *args, **kwargs:
self._all_world2pix(
*args, tolerance=tolerance, maxiter=maxiter,
adaptive=adaptive, detect_divergence=detect_divergence,
quiet=quiet),
'input', *args, **kwargs
)
all_world2pix.__doc__ = """
all_world2pix(*arg, tolerance=1.0e-4, maxiter=20,
adaptive=False, detect_divergence=True, quiet=False)
Transforms world coordinates to pixel coordinates, using
numerical iteration to invert the full forward transformation
`~astropy.wcs.WCS.all_pix2world` with complete
distortion model.
Parameters
----------
{0}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{1}
tolerance : float, optional (default = 1.0e-4)
Tolerance of solution. Iteration terminates when the
iterative solver estimates that the "true solution" is
within this many pixels current estimate, more
specifically, when the correction to the solution found
during the previous iteration is smaller
(in the sense of the L2 norm) than ``tolerance``.
maxiter : int, optional (default = 20)
Maximum number of iterations allowed to reach a solution.
quiet : bool, optional (default = False)
Do not throw :py:class:`NoConvergence` exceptions when
the method does not converge to a solution with the
required accuracy within a specified number of maximum
iterations set by ``maxiter`` parameter. Instead,
simply return the found solution.
Other Parameters
----------------
adaptive : bool, optional (default = False)
Specifies whether to adaptively select only points that
did not converge to a solution within the required
accuracy for the next iteration. Default is recommended
for HST as well as most other instruments.
.. note::
The :py:meth:`all_world2pix` uses a vectorized
implementation of the method of consecutive
approximations (see ``Notes`` section below) in which it
iterates over *all* input points *regardless* until
the required accuracy has been reached for *all* input
points. In some cases it may be possible that
*almost all* points have reached the required accuracy
but there are only a few of input data points for
which additional iterations may be needed (this
depends mostly on the characteristics of the geometric
distortions for a given instrument). In this situation
it may be advantageous to set ``adaptive`` = `True` in
which case :py:meth:`all_world2pix` will continue
iterating *only* over the points that have not yet
converged to the required accuracy. However, for the
HST's ACS/WFC detector, which has the strongest
distortions of all HST instruments, testing has
shown that enabling this option would lead to a about
50-100% penalty in computational time (depending on
specifics of the image, geometric distortions, and
number of input points to be converted). Therefore,
for HST and possibly instruments, it is recommended
to set ``adaptive`` = `False`. The only danger in
getting this setting wrong will be a performance
penalty.
.. note::
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will automatically switch
to the adaptive algorithm once divergence has been
detected.
detect_divergence : bool, optional (default = True)
Specifies whether to perform a more detailed analysis
of the convergence to a solution. Normally
:py:meth:`all_world2pix` may not achieve the required
accuracy if either the ``tolerance`` or ``maxiter`` arguments
are too low. However, it may happen that for some
geometric distortions the conditions of convergence for
the the method of consecutive approximations used by
:py:meth:`all_world2pix` may not be satisfied, in which
case consecutive approximations to the solution will
diverge regardless of the ``tolerance`` or ``maxiter``
settings.
When ``detect_divergence`` is `False`, these divergent
points will be detected as not having achieved the
required accuracy (without further details). In addition,
if ``adaptive`` is `False` then the algorithm will not
know that the solution (for specific points) is diverging
and will continue iterating and trying to "improve"
diverging solutions. This may result in ``NaN`` or
``Inf`` values in the return results (in addition to a
performance penalties). Even when ``detect_divergence``
is `False`, :py:meth:`all_world2pix`, at the end of the
iterative process, will identify invalid results
(``NaN`` or ``Inf``) as "diverging" solutions and will
raise :py:class:`NoConvergence` unless the ``quiet``
parameter is set to `True`.
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will detect points for which
current correction to the coordinates is larger than
the correction applied during the previous iteration
**if** the requested accuracy **has not yet been
achieved**. In this case, if ``adaptive`` is `True`,
these points will be excluded from further iterations and
if ``adaptive`` is `False`, :py:meth:`all_world2pix` will
automatically switch to the adaptive algorithm. Thus, the
reported divergent solution will be the latest converging
solution computed immediately *before* divergence
has been detected.
.. note::
When accuracy has been achieved, small increases in
current corrections may be possible due to rounding
errors (when ``adaptive`` is `False`) and such
increases will be ignored.
.. note::
Based on our testing using HST ACS/WFC images, setting
``detect_divergence`` to `True` will incur about 5-20%
performance penalty with the larger penalty
corresponding to ``adaptive`` set to `True`.
Because the benefits of enabling this
feature outweigh the small performance penalty,
especially when ``adaptive`` = `False`, it is
recommended to set ``detect_divergence`` to `True`,
unless extensive testing of the distortion models for
images from specific instruments show a good stability
of the numerical method for a wide range of
coordinates (even outside the image itself).
.. note::
Indices of the diverging inverse solutions will be
reported in the ``divergent`` attribute of the
raised :py:class:`NoConvergence` exception object.
Returns
-------
{2}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp`, and
`~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Using the method of fixed-point iterations approximations we
iterate starting with the initial approximation, which is
computed using the non-distortion-aware
:py:meth:`wcs_world2pix` (or equivalent).
The :py:meth:`all_world2pix` function uses a vectorized
implementation of the method of consecutive approximations and
therefore it is highly efficient (>30x) when *all* data points
that need to be converted from sky coordinates to image
coordinates are passed at *once*. Therefore, it is advisable,
whenever possible, to pass as input a long array of all points
that need to be converted to :py:meth:`all_world2pix` instead
of calling :py:meth:`all_world2pix` for each data point. Also
see the note to the ``adaptive`` parameter.
Raises
------
NoConvergence
The method did not converge to a
solution to the required accuracy within a specified
number of maximum iterations set by the ``maxiter``
parameter. To turn off this exception, set ``quiet`` to
`True`. Indices of the points for which the requested
accuracy was not achieved (if any) will be listed in the
``slow_conv`` attribute of the
raised :py:class:`NoConvergence` exception object.
See :py:class:`NoConvergence` documentation for
more details.
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Examples
--------
>>> import astropy.io.fits as fits
>>> import astropy.wcs as wcs
>>> import numpy as np
>>> import os
>>> filename = os.path.join(wcs.__path__[0], 'tests/data/j94f05bgq_flt.fits')
>>> hdulist = fits.open(filename)
>>> w = wcs.WCS(hdulist[('sci',1)].header, hdulist)
>>> hdulist.close()
>>> ra, dec = w.all_pix2world([1,2,3], [1,1,1], 1)
>>> print(ra) # doctest: +FLOAT_CMP
[ 5.52645627 5.52649663 5.52653698]
>>> print(dec) # doctest: +FLOAT_CMP
[-72.05171757 -72.05171276 -72.05170795]
>>> radec = w.all_pix2world([[1,1], [2,1], [3,1]], 1)
>>> print(radec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 5.52649663 -72.05171276]
[ 5.52653698 -72.05170795]]
>>> x, y = w.all_world2pix(ra, dec, 1)
>>> print(x) # doctest: +FLOAT_CMP
[ 1.00000238 2.00000237 3.00000236]
>>> print(y) # doctest: +FLOAT_CMP
[ 0.99999996 0.99999997 0.99999997]
>>> xy = w.all_world2pix(radec, 1)
>>> print(xy) # doctest: +FLOAT_CMP
[[ 1.00000238 0.99999996]
[ 2.00000237 0.99999997]
[ 3.00000236 0.99999997]]
>>> xy = w.all_world2pix(radec, 1, maxiter=3,
... tolerance=1.0e-10, quiet=False)
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 3 iterations, the solution is
diverging at least for one input point.
>>> # Now try to use some diverging data:
>>> divradec = w.all_pix2world([[1.0, 1.0],
... [10000.0, 50000.0],
... [3.0, 1.0]], 1)
>>> print(divradec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 7.15976932 -70.8140779 ]
[ 5.52653698 -72.05170795]]
>>> # First, turn detect_divergence on:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=True,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000238e+00 9.99999965e-01]
[ -1.99441636e+06 1.44309097e+06]
[ 3.00000236e+00 9.99999966e-01]]
Achieved accuracy:
[[ 6.13968380e-05 8.59638593e-07]
[ 8.59526812e+11 6.61713548e+11]
[ 6.09398446e-05 8.38759724e-07]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 5 iterations, the solution is
diverging at least for one input point.
>>> # This time turn detect_divergence off:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=False,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000009 1. ]
[ nan nan]
[ 3.00000009 1. ]]
Achieved accuracy:
[[ 2.29417358e-06 3.21222995e-08]
[ nan nan]
[ 2.27407877e-06 3.13005639e-08]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 6 iterations, the solution is
diverging at least for one input point.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('pixel coordinates', 8))
def wcs_world2pix(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.s2p(xy, o)['pixcrd'],
'input', *args, **kwargs)
wcs_world2pix.__doc__ = """
Transforms world coordinates to pixel coordinates, using only
the basic `wcslib`_ WCS transformation. No `SIP`_ or
`distortion paper`_ table lookup transformation is applied.
Parameters
----------
{}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{}
Returns
-------
{}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('pixel coordinates', 8))
def pix2foc(self, *args):
return self._array_converter(self._pix2foc, None, *args)
pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention and `distortion
paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('focal coordinates', 8))
def p4_pix2foc(self, *args):
return self._array_converter(self._p4_pix2foc, None, *args)
p4_pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('focal coordinates', 8))
def det2im(self, *args):
return self._array_converter(self._det2im, None, *args)
det2im.__doc__ = """
Convert detector coordinates to image plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('pixel coordinates', 8))
def sip_pix2foc(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.pix2foc, None, *args)
sip_pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention.
The output is in pixel coordinates, relative to ``CRPIX``.
FITS WCS `distortion paper`_ table lookup correction is not
applied, even if that information existed in the FITS file
that initialized this :class:`~astropy.wcs.WCS` object. To
correct for that, use `~astropy.wcs.WCS.pix2foc` or
`~astropy.wcs.WCS.p4_pix2foc`.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('focal coordinates', 8))
def sip_foc2pix(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.foc2pix, None, *args)
sip_foc2pix.__doc__ = """
Convert focal plane coordinates to pixel coordinates using the
`SIP`_ polynomial distortion convention.
FITS WCS `distortion paper`_ table lookup distortion
correction is not applied, even if that information existed in
the FITS file that initialized this `~astropy.wcs.WCS` object.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('pixel coordinates', 8))
def proj_plane_pixel_scales(self):
"""
Calculate pixel scales along each axis of the image pixel at
the ``CRPIX`` location once it is projected onto the
"plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This method is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
This method only returns sensible answers if the WCS contains
celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object.
Returns
-------
scale : list of `~astropy.units.Quantity`
A vector of projection plane increments corresponding to each
pixel side (axis).
See Also
--------
astropy.wcs.utils.proj_plane_pixel_scales
""" # noqa: E501
from astropy.wcs.utils import proj_plane_pixel_scales # Avoid circular import
values = proj_plane_pixel_scales(self)
units = [u.Unit(x) for x in self.wcs.cunit]
return [value * unit for (value, unit) in zip(values, units)] # Can have different units
def proj_plane_pixel_area(self):
"""
For a **celestial** WCS (see `astropy.wcs.WCS.celestial`), returns pixel
area of the image pixel at the ``CRPIX`` location once it is projected
onto the "plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
This method only returns sensible answers if the WCS contains
celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object.
Returns
-------
area : `~astropy.units.Quantity`
Area (in the projection plane) of the pixel at ``CRPIX`` location.
Raises
------
ValueError
Pixel area is defined only for 2D pixels. Most likely the
`~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial`
WCS is not a square matrix of second order.
Notes
-----
Depending on the application, square root of the pixel area can be used to
represent a single pixel scale of an equivalent square pixel
whose area is equal to the area of a generally non-square pixel.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_area
""" # noqa: E501
from astropy.wcs.utils import proj_plane_pixel_area # Avoid circular import
value = proj_plane_pixel_area(self)
unit = u.Unit(self.wcs.cunit[0]) * u.Unit(self.wcs.cunit[1]) # 2D only
return value * unit
def to_fits(self, relax=False, key=None):
"""
Generate an `~astropy.io.fits.HDUList` object with all of the
information stored in this object. This should be logically identical
to the input FITS file, but it will be normalized in a number of ways.
See `to_header` for some warnings about the output produced.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`astropy:relaxwrite` for details.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
header = self.to_header(relax=relax, key=key)
hdu = fits.PrimaryHDU(header=header)
hdulist = fits.HDUList(hdu)
self._write_det2im(hdulist)
self._write_distortion_kw(hdulist)
return hdulist
def to_header(self, relax=None, key=None):
"""Generate an `astropy.io.fits.Header` object with the basic WCS
and SIP information stored in this object. This should be
logically identical to the input FITS file, but it will be
normalized in a number of ways.
.. warning::
This function does not write out FITS WCS `distortion
paper`_ information, since that requires multiple FITS
header data units. To get a full representation of
everything in this object, use `to_fits`.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`astropy:relaxwrite` for details.
If the ``relax`` keyword argument is not given and any
keywords were omitted from the output, an
`~astropy.utils.exceptions.AstropyWarning` is displayed.
To override this, explicitly pass a value to ``relax``.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
header : `astropy.io.fits.Header`
Notes
-----
The output header will almost certainly differ from the input in a
number of respects:
1. The output header only contains WCS-related keywords. In
particular, it does not contain syntactically-required
keywords such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or
``END``.
2. Deprecated (e.g. ``CROTAn``) or non-standard usage will
be translated to standard (this is partially dependent on
whether ``fix`` was applied).
3. Quantities will be converted to the units used internally,
basically SI with the addition of degrees.
4. Floating-point quantities may be given to a different decimal
precision.
5. Elements of the ``PCi_j`` matrix will be written if and
only if they differ from the unit matrix. Thus, if the
matrix is unity then no elements will be written.
6. Additional keywords such as ``WCSAXES``, ``CUNITia``,
``LONPOLEa`` and ``LATPOLEa`` may appear.
7. The original keycomments will be lost, although
`to_header` tries hard to write meaningful comments.
8. Keyword order may be changed.
"""
# default precision for numerical WCS keywords
precision = WCSHDO_P14 # Defined by C-ext # noqa: F821
display_warning = False
if relax is None:
display_warning = True
relax = False
if relax not in (True, False):
do_sip = relax & WCSHDO_SIP
relax &= ~WCSHDO_SIP
else:
do_sip = relax
relax = WCSHDO_all if relax is True else WCSHDO_safe # Defined by C-ext # noqa: F821
relax = precision | relax
if self.wcs is not None:
if key is not None:
orig_key = self.wcs.alt
self.wcs.alt = key
header_string = self.wcs.to_header(relax)
header = fits.Header.fromstring(header_string)
keys_to_remove = ["", " ", "COMMENT"]
for kw in keys_to_remove:
if kw in header:
del header[kw]
# Check if we can handle TPD distortion correctly
if _WCS_TPD_WARN_LT71:
for kw, val in header.items():
if kw[:5] in ('CPDIS', 'CQDIS') and val == 'TPD':
warnings.warn(
f"WCS contains a TPD distortion model in {kw}. WCSLIB "
f"{_wcs.__version__} is writing this in a format incompatible with "
f"current versions - please update to 7.4 or use the bundled WCSLIB.",
AstropyWarning)
elif _WCS_TPD_WARN_LT74:
for kw, val in header.items():
if kw[:5] in ('CPDIS', 'CQDIS') and val == 'TPD':
warnings.warn(
f"WCS contains a TPD distortion model in {kw}, which requires WCSLIB "
f"7.4 or later to store in a FITS header (having {_wcs.__version__}).",
AstropyWarning)
else:
header = fits.Header()
if do_sip and self.sip is not None:
if self.wcs is not None and any(not ctyp.endswith('-SIP') for ctyp in self.wcs.ctype):
self._fix_ctype(header, add_sip=True)
for kw, val in self._write_sip_kw().items():
header[kw] = val
if not do_sip and self.wcs is not None and any(self.wcs.ctype) and self.sip is not None:
# This is called when relax is not False or WCSHDO_SIP
# The default case of ``relax=None`` is handled further in the code.
header = self._fix_ctype(header, add_sip=False)
if display_warning:
full_header = self.to_header(relax=True, key=key)
missing_keys = []
for kw, val in full_header.items():
if kw not in header:
missing_keys.append(kw)
if len(missing_keys):
warnings.warn(
"Some non-standard WCS keywords were excluded: {} "
"Use the ``relax`` kwarg to control this.".format(
', '.join(missing_keys)),
AstropyWarning)
# called when ``relax=None``
# This is different from the case of ``relax=False``.
if any(self.wcs.ctype) and self.sip is not None:
header = self._fix_ctype(header, add_sip=False, log_message=False)
# Finally reset the key. This must be called after ``_fix_ctype``.
if key is not None:
self.wcs.alt = orig_key
return header
def _fix_ctype(self, header, add_sip=True, log_message=True):
"""
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header.
add_sip : bool
Flag indicating whether "-SIP" should be added or removed from CTYPE keywords.
Remove "-SIP" from CTYPE when writing out a header with relax=False.
This needs to be done outside ``to_header`` because ``to_header`` runs
twice when ``relax=False`` and the second time ``relax`` is set to ``True``
to display the missing keywords.
If the user requested SIP distortion to be written out add "-SIP" to
CTYPE if it is missing.
"""
_add_sip_to_ctype = """
Inconsistent SIP distortion information is present in the current WCS:
SIP coefficients were detected, but CTYPE is missing "-SIP" suffix,
therefore the current WCS is internally inconsistent.
Because relax has been set to True, the resulting output WCS will have
"-SIP" appended to CTYPE in order to make the header internally consistent.
However, this may produce incorrect astrometry in the output WCS, if
in fact the current WCS is already distortion-corrected.
Therefore, if current WCS is already distortion-corrected (eg, drizzled)
then SIP distortion components should not apply. In that case, for a WCS
that is already distortion-corrected, please remove the SIP coefficients
from the header.
"""
if log_message:
if add_sip:
log.info(_add_sip_to_ctype)
for i in range(1, self.naxis+1):
# strip() must be called here to cover the case of alt key= " "
kw = f'CTYPE{i}{self.wcs.alt}'.strip()
if kw in header:
if add_sip:
val = header[kw].strip("-SIP") + "-SIP"
else:
val = header[kw].strip("-SIP")
header[kw] = val
else:
continue
return header
def to_header_string(self, relax=None):
"""
Identical to `to_header`, but returns a string containing the
header cards.
"""
return str(self.to_header(relax))
def footprint_to_file(self, filename='footprint.reg', color='green',
width=2, coordsys=None):
"""
Writes out a `ds9`_ style regions file. It can be loaded
directly by `ds9`_.
Parameters
----------
filename : str, optional
Output file name - default is ``'footprint.reg'``
color : str, optional
Color to use when plotting the line.
width : int, optional
Width of the region line.
coordsys : str, optional
Coordinate system. If not specified (default), the ``radesys``
value is used. For all possible values, see
http://ds9.si.edu/doc/ref/region.html#RegionFileFormat
"""
comments = ('# Region file format: DS9 version 4.0 \n'
'# global color=green font="helvetica 12 bold '
'select=1 highlite=1 edit=1 move=1 delete=1 '
'include=1 fixed=0 source\n')
coordsys = coordsys or self.wcs.radesys
if coordsys not in ('PHYSICAL', 'IMAGE', 'FK4', 'B1950', 'FK5',
'J2000', 'GALACTIC', 'ECLIPTIC', 'ICRS', 'LINEAR',
'AMPLIFIER', 'DETECTOR'):
raise ValueError("Coordinate system '{}' is not supported. A valid"
" one can be given with the 'coordsys' argument."
.format(coordsys))
with open(filename, mode='w') as f:
f.write(comments)
f.write(f'{coordsys}\n')
f.write('polygon(')
ftpr = self.calc_footprint()
if ftpr is not None:
ftpr.tofile(f, sep=',')
f.write(f') # color={color}, width={width:d} \n')
def _get_naxis(self, header=None):
_naxis = []
if (header is not None and
not isinstance(header, (str, bytes))):
for naxis in itertools.count(1):
try:
_naxis.append(header[f'NAXIS{naxis}'])
except KeyError:
break
if len(_naxis) == 0:
_naxis = [0, 0]
elif len(_naxis) == 1:
_naxis.append(0)
self._naxis = _naxis
def printwcs(self):
print(repr(self))
def __repr__(self):
'''
Return a short description. Simply porting the behavior from
the `printwcs()` method.
'''
description = ["WCS Keywords\n",
f"Number of WCS axes: {self.naxis!r}"]
sfmt = ' : ' + "".join(["{"+f"{i}"+"!r} " for i in range(self.naxis)])
keywords = ['CTYPE', 'CRVAL', 'CRPIX']
values = [self.wcs.ctype, self.wcs.crval, self.wcs.crpix]
for keyword, value in zip(keywords, values):
description.append(keyword+sfmt.format(*value))
if hasattr(self.wcs, 'pc'):
for i in range(self.naxis):
s = ''
for j in range(self.naxis):
s += ''.join(['PC', str(i+1), '_', str(j+1), ' '])
s += sfmt
description.append(s.format(*self.wcs.pc[i]))
s = 'CDELT' + sfmt
description.append(s.format(*self.wcs.cdelt))
elif hasattr(self.wcs, 'cd'):
for i in range(self.naxis):
s = ''
for j in range(self.naxis):
s += "".join(['CD', str(i+1), '_', str(j+1), ' '])
s += sfmt
description.append(s.format(*self.wcs.cd[i]))
description.append(f"NAXIS : {' '.join(map(str, self._naxis))}")
return '\n'.join(description)
def get_axis_types(self):
"""
Similar to `self.wcsprm.axis_types <astropy.wcs.Wcsprm.axis_types>`
but provides the information in a more Python-friendly format.
Returns
-------
result : list of dict
Returns a list of dictionaries, one for each axis, each
containing attributes about the type of that axis.
Each dictionary has the following keys:
- 'coordinate_type':
- None: Non-specific coordinate type.
- 'stokes': Stokes coordinate.
- 'celestial': Celestial coordinate (including ``CUBEFACE``).
- 'spectral': Spectral coordinate.
- 'scale':
- 'linear': Linear axis.
- 'quantized': Quantized axis (``STOKES``, ``CUBEFACE``).
- 'non-linear celestial': Non-linear celestial axis.
- 'non-linear spectral': Non-linear spectral axis.
- 'logarithmic': Logarithmic axis.
- 'tabular': Tabular axis.
- 'group'
- Group number, e.g. lookup table number
- 'number'
- For celestial axes:
- 0: Longitude coordinate.
- 1: Latitude coordinate.
- 2: ``CUBEFACE`` number.
- For lookup tables:
- the axis number in a multidimensional table.
``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will
generate an error.
"""
if self.wcs is None:
raise AttributeError(
"This WCS object does not have a wcsprm object.")
coordinate_type_map = {
0: None,
1: 'stokes',
2: 'celestial',
3: 'spectral'}
scale_map = {
0: 'linear',
1: 'quantized',
2: 'non-linear celestial',
3: 'non-linear spectral',
4: 'logarithmic',
5: 'tabular'}
result = []
for axis_type in self.wcs.axis_types:
subresult = {}
coordinate_type = (axis_type // 1000) % 10
subresult['coordinate_type'] = coordinate_type_map[coordinate_type]
scale = (axis_type // 100) % 10
subresult['scale'] = scale_map[scale]
group = (axis_type // 10) % 10
subresult['group'] = group
number = axis_type % 10
subresult['number'] = number
result.append(subresult)
return result
def __reduce__(self):
"""
Support pickling of WCS objects. This is done by serializing
to an in-memory FITS file and dumping that as a string.
"""
hdulist = self.to_fits(relax=True)
buffer = io.BytesIO()
hdulist.writeto(buffer)
dct = self.__dict__.copy()
dct['_alt_wcskey'] = self.wcs.alt
return (__WCS_unpickle__,
(self.__class__, dct, buffer.getvalue(),))
def dropaxis(self, dropax):
"""
Remove an axis from the WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS with naxis to be chopped to naxis-1
dropax : int
The index of the WCS to drop, counting from 0 (i.e., python convention,
not FITS convention)
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with one axis fewer
"""
inds = list(range(self.wcs.naxis))
inds.pop(dropax)
# axis 0 has special meaning to sub
# if wcs.wcs.ctype == ['RA','DEC','VLSR'], you want
# wcs.sub([1,2]) to get 'RA','DEC' back
return self.sub([i+1 for i in inds])
def swapaxes(self, ax0, ax1):
"""
Swap axes in a WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to have its axes swapped
ax0 : int
ax1 : int
The indices of the WCS to be swapped, counting from 0 (i.e., python
convention, not FITS convention)
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with the same number of axes,
but two swapped
"""
inds = list(range(self.wcs.naxis))
inds[ax0], inds[ax1] = inds[ax1], inds[ax0]
return self.sub([i+1 for i in inds])
def reorient_celestial_first(self):
"""
Reorient the WCS such that the celestial axes are first, followed by
the spectral axis, followed by any others.
Assumes at least celestial axes are present.
"""
return self.sub([WCSSUB_CELESTIAL, WCSSUB_SPECTRAL, WCSSUB_STOKES, WCSSUB_TIME]) # Defined by C-ext # noqa: F821 E501
def slice(self, view, numpy_order=True):
"""
Slice a WCS instance using a Numpy slice. The order of the slice should
be reversed (as for the data) compared to the natural WCS order.
Parameters
----------
view : tuple
A tuple containing the same number of slices as the WCS system.
The ``step`` method, the third argument to a slice, is not
presently supported.
numpy_order : bool
Use numpy order, i.e. slice the WCS so that an identical slice
applied to a numpy array will slice the array and WCS in the same
way. If set to `False`, the WCS will be sliced in FITS order,
meaning the first slice will be applied to the *last* numpy index
but the *first* WCS axis.
Returns
-------
wcs_new : `~astropy.wcs.WCS`
A new resampled WCS axis
"""
if hasattr(view, '__len__') and len(view) > self.wcs.naxis:
raise ValueError("Must have # of slices <= # of WCS axes")
elif not hasattr(view, '__len__'): # view MUST be an iterable
view = [view]
if not all(isinstance(x, slice) for x in view):
# We need to drop some dimensions, but this may not always be
# possible with .sub due to correlated axes, so instead we use the
# generalized slicing infrastructure from astropy.wcs.wcsapi.
return SlicedFITSWCS(self, view)
# NOTE: we could in principle use SlicedFITSWCS as above for all slicing,
# but in the simple case where there are no axes dropped, we can just
# create a full WCS object with updated WCS parameters which is faster
# for this specific case and also backward-compatible.
wcs_new = self.deepcopy()
if wcs_new.sip is not None:
sip_crpix = wcs_new.sip.crpix.tolist()
for i, iview in enumerate(view):
if iview.step is not None and iview.step < 0:
raise NotImplementedError("Reversing an axis is not "
"implemented.")
if numpy_order:
wcs_index = self.wcs.naxis - 1 - i
else:
wcs_index = i
if iview.step is not None and iview.start is None:
# Slice from "None" is equivalent to slice from 0 (but one
# might want to downsample, so allow slices with
# None,None,step or None,stop,step)
iview = slice(0, iview.stop, iview.step)
if iview.start is not None:
if iview.step not in (None, 1):
crpix = self.wcs.crpix[wcs_index]
cdelt = self.wcs.cdelt[wcs_index]
# equivalently (keep this comment so you can compare eqns):
# wcs_new.wcs.crpix[wcs_index] =
# (crpix - iview.start)*iview.step + 0.5 - iview.step/2.
crp = ((crpix - iview.start - 1.)/iview.step
+ 0.5 + 1./iview.step/2.)
wcs_new.wcs.crpix[wcs_index] = crp
if wcs_new.sip is not None:
sip_crpix[wcs_index] = crp
wcs_new.wcs.cdelt[wcs_index] = cdelt * iview.step
else:
wcs_new.wcs.crpix[wcs_index] -= iview.start
if wcs_new.sip is not None:
sip_crpix[wcs_index] -= iview.start
try:
# range requires integers but the other attributes can also
# handle arbitrary values, so this needs to be in a try/except.
nitems = len(builtins.range(self._naxis[wcs_index])[iview])
except TypeError as exc:
if 'indices must be integers' not in str(exc):
raise
warnings.warn("NAXIS{} attribute is not updated because at "
"least one index ('{}') is no integer."
"".format(wcs_index, iview), AstropyUserWarning)
else:
wcs_new._naxis[wcs_index] = nitems
if wcs_new.sip is not None:
wcs_new.sip = Sip(self.sip.a, self.sip.b, self.sip.ap, self.sip.bp,
sip_crpix)
return wcs_new
def __getitem__(self, item):
# "getitem" is a shortcut for self.slice; it is very limited
# there is no obvious and unambiguous interpretation of wcs[1,2,3]
# We COULD allow wcs[1] to link to wcs.sub([2])
# (wcs[i] -> wcs.sub([i+1])
return self.slice(item)
def __iter__(self):
# Having __getitem__ makes Python think WCS is iterable. However,
# Python first checks whether __iter__ is present, so we can raise an
# exception here.
raise TypeError(f"'{self.__class__.__name__}' object is not iterable")
@property
def axis_type_names(self):
"""
World names for each coordinate axis
Returns
-------
list of str
A list of names along each axis.
"""
names = list(self.wcs.cname)
types = self.wcs.ctype
for i in range(len(names)):
if len(names[i]) > 0:
continue
names[i] = types[i].split('-')[0]
return names
@property
def celestial(self):
"""
A copy of the current WCS with only the celestial axes included
"""
return self.sub([WCSSUB_CELESTIAL]) # Defined by C-ext # noqa: F821
@property
def is_celestial(self):
return self.has_celestial and self.naxis == 2
@property
def has_celestial(self):
try:
return self.wcs.lng >= 0 and self.wcs.lat >= 0
except InconsistentAxisTypesError:
return False
@property
def spectral(self):
"""
A copy of the current WCS with only the spectral axes included
"""
return self.sub([WCSSUB_SPECTRAL]) # Defined by C-ext # noqa: F821
@property
def is_spectral(self):
return self.has_spectral and self.naxis == 1
@property
def has_spectral(self):
try:
return self.wcs.spec >= 0
except InconsistentAxisTypesError:
return False
@property
def temporal(self):
"""
A copy of the current WCS with only the time axes included
"""
if not _WCSSUB_TIME_SUPPORT:
raise NotImplementedError(
"Support for 'temporal' axis requires WCSLIB version 7.8 or "
f"greater but linked WCSLIB version is {_wcs.__version__}"
)
return self.sub([WCSSUB_TIME]) # Defined by C-ext # noqa: F821
@property
def is_temporal(self):
return self.has_temporal and self.naxis == 1
@property
def has_temporal(self):
return any(t // 1000 == 4 for t in self.wcs.axis_types)
@property
def has_distortion(self):
"""
Returns `True` if any distortion terms are present.
"""
return (self.sip is not None or
self.cpdis1 is not None or self.cpdis2 is not None or
self.det2im1 is not None and self.det2im2 is not None)
@property
def pixel_scale_matrix(self):
try:
cdelt = np.diag(self.wcs.get_cdelt())
pc = self.wcs.get_pc()
except InconsistentAxisTypesError:
try:
# for non-celestial axes, get_cdelt doesn't work
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', 'cdelt will be ignored since cd is present', RuntimeWarning)
cdelt = np.dot(self.wcs.cd, np.diag(self.wcs.cdelt))
except AttributeError:
cdelt = np.diag(self.wcs.cdelt)
try:
pc = self.wcs.pc
except AttributeError:
pc = 1
pccd = np.dot(cdelt, pc)
return pccd
def footprint_contains(self, coord, **kwargs):
"""
Determines if a given SkyCoord is contained in the wcs footprint.
Parameters
----------
coord : `~astropy.coordinates.SkyCoord`
The coordinate to check if it is within the wcs coordinate.
**kwargs :
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
return coord.contained_by(self, **kwargs)
def __WCS_unpickle__(cls, dct, fits_data):
"""
Unpickles a WCS object from a serialized FITS string.
"""
self = cls.__new__(cls)
buffer = io.BytesIO(fits_data)
hdulist = fits.open(buffer)
naxis = dct.pop('naxis', None)
if naxis:
hdulist[0].header['naxis'] = naxis
naxes = dct.pop('_naxis', [])
for k, na in enumerate(naxes):
hdulist[0].header[f'naxis{k + 1:d}'] = na
kwargs = dct.pop('_init_kwargs', {})
self.__dict__.update(dct)
wcskey = dct.pop('_alt_wcskey', ' ')
WCS.__init__(self, hdulist[0].header, hdulist, key=wcskey, **kwargs)
self.pixel_bounds = dct.get('_pixel_bounds', None)
return self
def find_all_wcs(header, relax=True, keysel=None, fix=True,
translate_units='',
_do_set=True):
"""
Find all the WCS transformations in the given header.
Parameters
----------
header : str or `~astropy.io.fits.Header` object.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions of the
WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`astropy:relaxread` for details.
keysel : sequence of str, optional
A list of flags used to select the keyword types considered by
wcslib. When ``None``, only the standard image header
keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting objects to fix any non-standard uses in the
header. `FITSFixedWarning` warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Returns
-------
wcses : list of `WCS`
"""
if isinstance(header, (str, bytes)):
header_string = header
elif isinstance(header, fits.Header):
header_string = header.tostring()
else:
raise TypeError(
"header must be a string or astropy.io.fits.Header object")
keysel_flags = _parse_keysel(keysel)
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
else:
header_bytes = header_string
wcsprms = _wcs.find_all_wcs(header_bytes, relax, keysel_flags)
result = []
for wcsprm in wcsprms:
subresult = WCS(fix=False, _do_set=False)
subresult.wcs = wcsprm
result.append(subresult)
if fix:
subresult.fix(translate_units)
if _do_set:
subresult.wcs.set()
return result
def validate(source):
"""
Prints a WCS validation report for the given FITS file.
Parameters
----------
source : str or file-like or `~astropy.io.fits.HDUList`
The FITS file to validate.
Returns
-------
results : list subclass instance
The result is returned as nested lists. The first level
corresponds to the HDUs in the given file. The next level has
an entry for each WCS found in that header. The special
subclass of list will pretty-print the results as a table when
printed.
"""
class _WcsValidateWcsResult(list):
def __init__(self, key):
self._key = key
def __repr__(self):
result = [f" WCS key '{self._key or ' '}':"]
if len(self):
for entry in self:
for i, line in enumerate(entry.splitlines()):
if i == 0:
initial_indent = ' - '
else:
initial_indent = ' '
result.extend(
textwrap.wrap(
line,
initial_indent=initial_indent,
subsequent_indent=' '))
else:
result.append(" No issues.")
return '\n'.join(result)
class _WcsValidateHduResult(list):
def __init__(self, hdu_index, hdu_name):
self._hdu_index = hdu_index
self._hdu_name = hdu_name
list.__init__(self)
def __repr__(self):
if len(self):
if self._hdu_name:
hdu_name = f' ({self._hdu_name})'
else:
hdu_name = ''
result = [f'HDU {self._hdu_index}{hdu_name}:']
for wcs in self:
result.append(repr(wcs))
return '\n'.join(result)
return ''
class _WcsValidateResults(list):
def __repr__(self):
result = []
for hdu in self:
content = repr(hdu)
if len(content):
result.append(content)
return '\n\n'.join(result)
global __warningregistry__
if isinstance(source, fits.HDUList):
hdulist = source
else:
hdulist = fits.open(source)
results = _WcsValidateResults()
for i, hdu in enumerate(hdulist):
hdu_results = _WcsValidateHduResult(i, hdu.name)
results.append(hdu_results)
with warnings.catch_warnings(record=True) as warning_lines:
wcses = find_all_wcs(
hdu.header, relax=_wcs.WCSHDR_reject,
fix=False, _do_set=False)
for wcs in wcses:
wcs_results = _WcsValidateWcsResult(wcs.wcs.alt)
hdu_results.append(wcs_results)
try:
del __warningregistry__
except NameError:
pass
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter(
"always", FITSFixedWarning, append=True)
try:
WCS(hdu.header,
key=wcs.wcs.alt or ' ',
relax=_wcs.WCSHDR_reject,
fix=True, _do_set=False)
except WcsError as e:
wcs_results.append(str(e))
wcs_results.extend([str(x.message) for x in warning_lines])
return results
|
da726047ebb24c42a76b25ae71bf416e2b24abb9006e11fb95c8b3b7ca564503 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.time package provides functionality for manipulating times and
dates. Specific emphasis is placed on supporting time scales (e.g. UTC, TAI,
UT1) and time representations (e.g. JD, MJD, ISO 8601) that are used in
astronomy.
"""
import os
import copy
import enum
import operator
import threading
from datetime import datetime, date, timedelta
from time import strftime
from warnings import warn
import numpy as np
import erfa
from astropy import units as u, constants as const
from astropy.units import UnitConversionError
from astropy.utils import ShapedLikeNDArray
from astropy.utils.compat.misc import override__dir__
from astropy.utils.data_info import MixinInfo, data_info_factory
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from .utils import day_frac
from .formats import (TIME_FORMATS, TIME_DELTA_FORMATS,
TimeJD, TimeUnique, TimeAstropyTime, TimeDatetime)
# Import TimeFromEpoch to avoid breaking code that followed the old example of
# making a custom timescale in the documentation.
from .formats import TimeFromEpoch # noqa
from .time_helper.function_helpers import CUSTOM_FUNCTIONS, UNSUPPORTED_FUNCTIONS
from astropy.extern import _strptime
__all__ = ['TimeBase', 'Time', 'TimeDelta', 'TimeInfo', 'TimeInfoBase', 'update_leap_seconds',
'TIME_SCALES', 'STANDARD_TIME_SCALES', 'TIME_DELTA_SCALES',
'ScaleValueError', 'OperandTypeError', 'TimeDeltaMissingUnitWarning']
STANDARD_TIME_SCALES = ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')
LOCAL_SCALES = ('local',)
TIME_TYPES = {scale: scales for scales in (STANDARD_TIME_SCALES, LOCAL_SCALES) for scale in scales}
TIME_SCALES = STANDARD_TIME_SCALES + LOCAL_SCALES
MULTI_HOPS = {('tai', 'tcb'): ('tt', 'tdb'),
('tai', 'tcg'): ('tt',),
('tai', 'ut1'): ('utc',),
('tai', 'tdb'): ('tt',),
('tcb', 'tcg'): ('tdb', 'tt'),
('tcb', 'tt'): ('tdb',),
('tcb', 'ut1'): ('tdb', 'tt', 'tai', 'utc'),
('tcb', 'utc'): ('tdb', 'tt', 'tai'),
('tcg', 'tdb'): ('tt',),
('tcg', 'ut1'): ('tt', 'tai', 'utc'),
('tcg', 'utc'): ('tt', 'tai'),
('tdb', 'ut1'): ('tt', 'tai', 'utc'),
('tdb', 'utc'): ('tt', 'tai'),
('tt', 'ut1'): ('tai', 'utc'),
('tt', 'utc'): ('tai',),
}
GEOCENTRIC_SCALES = ('tai', 'tt', 'tcg')
BARYCENTRIC_SCALES = ('tcb', 'tdb')
ROTATIONAL_SCALES = ('ut1',)
TIME_DELTA_TYPES = {scale: scales
for scales in (GEOCENTRIC_SCALES, BARYCENTRIC_SCALES,
ROTATIONAL_SCALES, LOCAL_SCALES) for scale in scales}
TIME_DELTA_SCALES = GEOCENTRIC_SCALES + BARYCENTRIC_SCALES + ROTATIONAL_SCALES + LOCAL_SCALES
# For time scale changes, we need L_G and L_B, which are stored in erfam.h as
# /* L_G = 1 - d(TT)/d(TCG) */
# define ERFA_ELG (6.969290134e-10)
# /* L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0 */
# define ERFA_ELB (1.550519768e-8)
# These are exposed in erfa as erfa.ELG and erfa.ELB.
# Implied: d(TT)/d(TCG) = 1-L_G
# and d(TCG)/d(TT) = 1/(1-L_G) = 1 + (1-(1-L_G))/(1-L_G) = 1 + L_G/(1-L_G)
# scale offsets as second = first + first * scale_offset[(first,second)]
SCALE_OFFSETS = {('tt', 'tai'): None,
('tai', 'tt'): None,
('tcg', 'tt'): -erfa.ELG,
('tt', 'tcg'): erfa.ELG / (1. - erfa.ELG),
('tcg', 'tai'): -erfa.ELG,
('tai', 'tcg'): erfa.ELG / (1. - erfa.ELG),
('tcb', 'tdb'): -erfa.ELB,
('tdb', 'tcb'): erfa.ELB / (1. - erfa.ELB)}
# triple-level dictionary, yay!
SIDEREAL_TIME_MODELS = {
'mean': {
'IAU2006': {'function': erfa.gmst06, 'scales': ('ut1', 'tt')},
'IAU2000': {'function': erfa.gmst00, 'scales': ('ut1', 'tt')},
'IAU1982': {'function': erfa.gmst82, 'scales': ('ut1',), 'include_tio': False}
},
'apparent': {
'IAU2006A': {'function': erfa.gst06a, 'scales': ('ut1', 'tt')},
'IAU2000A': {'function': erfa.gst00a, 'scales': ('ut1', 'tt')},
'IAU2000B': {'function': erfa.gst00b, 'scales': ('ut1',)},
'IAU1994': {'function': erfa.gst94, 'scales': ('ut1',), 'include_tio': False}
}}
class _LeapSecondsCheck(enum.Enum):
NOT_STARTED = 0 # No thread has reached the check
RUNNING = 1 # A thread is running update_leap_seconds (_LEAP_SECONDS_LOCK is held)
DONE = 2 # update_leap_seconds has completed
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.NOT_STARTED
_LEAP_SECONDS_LOCK = threading.RLock()
class TimeInfoBase(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
This base class is common between TimeInfo and TimeDeltaInfo.
"""
attr_names = MixinInfo.attr_names | {'serialize_method'}
_supports_indexing = True
# The usual tuple of attributes needed for serialization is replaced
# by a property, since Time can be serialized different ways.
_represent_as_dict_extra_attrs = ('format', 'scale', 'precision',
'in_subfmt', 'out_subfmt', 'location',
'_delta_ut1_utc', '_delta_tdb_tt')
# When serializing, write out the `value` attribute using the column name.
_represent_as_dict_primary_data = 'value'
mask_val = np.ma.masked
@property
def _represent_as_dict_attrs(self):
method = self.serialize_method[self._serialize_context]
if method == 'formatted_value':
out = ('value',)
elif method == 'jd1_jd2':
out = ('jd1', 'jd2')
else:
raise ValueError("serialize method must be 'formatted_value' or 'jd1_jd2'")
return out + self._represent_as_dict_extra_attrs
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
# If ``True`` for a context, then use formatted ``value`` attribute
# (e.g. the ISO time string). If ``False`` then use float jd1 and jd2.
self.serialize_method = {'fits': 'jd1_jd2',
'ecsv': 'formatted_value',
'hdf5': 'jd1_jd2',
'yaml': 'jd1_jd2',
'parquet': 'jd1_jd2',
None: 'jd1_jd2'}
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
Returns
-------
arrays : list of ndarray
"""
parent = self._parent
jd_approx = parent.jd
jd_remainder = (parent - parent.__class__(jd_approx, format='jd')).jd
return [jd_approx, jd_remainder]
@property
def unit(self):
return None
info_summary_stats = staticmethod(
data_info_factory(names=MixinInfo._stats,
funcs=[getattr(np, stat) for stat in MixinInfo._stats]))
# When Time has mean, std, min, max methods:
# funcs = [lambda x: getattr(x, stat)() for stat_name in MixinInfo._stats])
def _construct_from_dict(self, map):
if 'jd1' in map and 'jd2' in map:
# Initialize as JD but revert to desired format and out_subfmt (if needed)
format = map.pop('format')
out_subfmt = map.pop('out_subfmt', None)
map['format'] = 'jd'
map['val'] = map.pop('jd1')
map['val2'] = map.pop('jd2')
out = self._parent_cls(**map)
out.format = format
if out_subfmt is not None:
out.out_subfmt = out_subfmt
else:
map['val'] = map.pop('value')
out = self._parent_cls(**map)
return out
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Time instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'description'))
attrs.pop('dtype') # Not relevant for Time
col0 = cols[0]
# Check that location is consistent for all Time objects
for col in cols[1:]:
# This is the method used by __setitem__ to ensure that the right side
# has a consistent location (and coerce data if necessary, but that does
# not happen in this case since `col` is already a Time object). If this
# passes then any subsequent table operations via setitem will work.
try:
col0._make_value_equivalent(slice(None), col)
except ValueError:
raise ValueError('input columns have inconsistent locations')
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop('shape')
jd2000 = 2451544.5 # Arbitrary JD value J2000.0 that will work with ERFA
jd1 = np.full(shape, jd2000, dtype='f8')
jd2 = np.zeros(shape, dtype='f8')
tm_attrs = {attr: getattr(col0, attr)
for attr in ('scale', 'location',
'precision', 'in_subfmt', 'out_subfmt')}
out = self._parent_cls(jd1, jd2, format='jd', **tm_attrs)
out.format = col0.format
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeInfo(TimeInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
def _represent_as_dict(self, attrs=None):
"""Get the values for the parent ``attrs`` and return as a dict.
By default, uses '_represent_as_dict_attrs'.
"""
map = super()._represent_as_dict(attrs=attrs)
# TODO: refactor these special cases into the TimeFormat classes?
# The datetime64 format requires special handling for ECSV (see #12840).
# The `value` has numpy dtype datetime64 but this is not an allowed
# datatype for ECSV. Instead convert to a string representation.
if (self._serialize_context == 'ecsv'
and map['format'] == 'datetime64'
and 'value' in map):
map['value'] = map['value'].astype('U')
# The datetime format is serialized as ISO with no loss of precision.
if map['format'] == 'datetime' and 'value' in map:
map['value'] = np.vectorize(lambda x: x.isoformat())(map['value'])
return map
def _construct_from_dict(self, map):
# See comment above. May need to convert string back to datetime64.
# Note that _serialize_context is not set here so we just look for the
# string value directly.
if (map['format'] == 'datetime64'
and 'value' in map
and map['value'].dtype.kind == 'U'):
map['value'] = map['value'].astype('datetime64')
# Convert back to datetime objects for datetime format.
if map['format'] == 'datetime' and 'value' in map:
from datetime import datetime
map['value'] = np.vectorize(datetime.fromisoformat)(map['value'])
delta_ut1_utc = map.pop('_delta_ut1_utc', None)
delta_tdb_tt = map.pop('_delta_tdb_tt', None)
out = super()._construct_from_dict(map)
if delta_ut1_utc is not None:
out._delta_ut1_utc = delta_ut1_utc
if delta_tdb_tt is not None:
out._delta_tdb_tt = delta_tdb_tt
return out
class TimeDeltaInfo(TimeInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_extra_attrs = ('format', 'scale')
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new TimeDelta instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'description'))
attrs.pop('dtype') # Not relevant for Time
col0 = cols[0]
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop('shape')
jd1 = np.zeros(shape, dtype='f8')
jd2 = np.zeros(shape, dtype='f8')
out = self._parent_cls(jd1, jd2, format='jd', scale=col0.scale)
out.format = col0.format
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeBase(ShapedLikeNDArray):
"""Base time class from which Time and TimeDelta inherit."""
# Make sure that reverse arithmetic (e.g., TimeDelta.__rmul__)
# gets called over the __mul__ of Numpy arrays.
__array_priority__ = 20000
# Declare that Time can be used as a Table column by defining the
# attribute where column attributes will be stored.
_astropy_column_attrs = None
def __getnewargs__(self):
return (self._time,)
def _init_from_vals(self, val, val2, format, scale, copy,
precision=None, in_subfmt=None, out_subfmt=None):
"""
Set the internal _format, scale, and _time attrs from user
inputs. This handles coercion into the correct shapes and
some basic input validation.
"""
if precision is None:
precision = 3
if in_subfmt is None:
in_subfmt = '*'
if out_subfmt is None:
out_subfmt = '*'
# Coerce val into an array
val = _make_array(val, copy)
# If val2 is not None, ensure consistency
if val2 is not None:
val2 = _make_array(val2, copy)
try:
np.broadcast(val, val2)
except ValueError:
raise ValueError('Input val and val2 have inconsistent shape; '
'they cannot be broadcast together.')
if scale is not None:
if not (isinstance(scale, str)
and scale.lower() in self.SCALES):
raise ScaleValueError("Scale {!r} is not in the allowed scales "
"{}".format(scale,
sorted(self.SCALES)))
# If either of the input val, val2 are masked arrays then
# find the masked elements and fill them.
mask, val, val2 = _check_for_masked_and_fill(val, val2)
# Parse / convert input values into internal jd1, jd2 based on format
self._time = self._get_time_fmt(val, val2, format, scale,
precision, in_subfmt, out_subfmt)
self._format = self._time.name
# Hack from #9969 to allow passing the location value that has been
# collected by the TimeAstropyTime format class up to the Time level.
# TODO: find a nicer way.
if hasattr(self._time, '_location'):
self.location = self._time._location
del self._time._location
# If any inputs were masked then masked jd2 accordingly. From above
# routine ``mask`` must be either Python bool False or an bool ndarray
# with shape broadcastable to jd2.
if mask is not False:
mask = np.broadcast_to(mask, self._time.jd2.shape)
self._time.jd1[mask] = 2451544.5 # Set to JD for 2000-01-01
self._time.jd2[mask] = np.nan
def _get_time_fmt(self, val, val2, format, scale,
precision, in_subfmt, out_subfmt):
"""
Given the supplied val, val2, format and scale try to instantiate
the corresponding TimeFormat class to convert the input values into
the internal jd1 and jd2.
If format is `None` and the input is a string-type or object array then
guess available formats and stop when one matches.
"""
if (format is None
and (val.dtype.kind in ('S', 'U', 'O', 'M') or val.dtype.names)):
# Input is a string, object, datetime, or a table-like ndarray
# (structured array, recarray). These input types can be
# uniquely identified by the format classes.
formats = [(name, cls) for name, cls in self.FORMATS.items()
if issubclass(cls, TimeUnique)]
# AstropyTime is a pseudo-format that isn't in the TIME_FORMATS registry,
# but try to guess it at the end.
formats.append(('astropy_time', TimeAstropyTime))
elif not (isinstance(format, str)
and format.lower() in self.FORMATS):
if format is None:
raise ValueError("No time format was given, and the input is "
"not unique")
else:
raise ValueError("Format {!r} is not one of the allowed "
"formats {}".format(format,
sorted(self.FORMATS)))
else:
formats = [(format, self.FORMATS[format])]
assert formats
problems = {}
for name, cls in formats:
try:
return cls(val, val2, scale, precision, in_subfmt, out_subfmt)
except UnitConversionError:
raise
except (ValueError, TypeError) as err:
# If ``format`` specified then there is only one possibility, so raise
# immediately and include the upstream exception message to make it
# easier for user to see what is wrong.
if len(formats) == 1:
raise ValueError(
f'Input values did not match the format class {format}:'
+ os.linesep
+ f'{err.__class__.__name__}: {err}'
) from err
else:
problems[name] = err
else:
raise ValueError(f'Input values did not match any of the formats '
f'where the format keyword is optional: '
f'{problems}') from problems[formats[0][0]]
@property
def writeable(self):
return self._time.jd1.flags.writeable & self._time.jd2.flags.writeable
@writeable.setter
def writeable(self, value):
self._time.jd1.flags.writeable = value
self._time.jd2.flags.writeable = value
@property
def format(self):
"""
Get or set time format.
The format defines the way times are represented when accessed via the
``.value`` attribute. By default it is the same as the format used for
initializing the `Time` instance, but it can be set to any other value
that could be used for initialization. These can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date',
'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64',
'fits', 'byear', 'jyear', 'byear_str', 'jyear_str']
"""
return self._format
@format.setter
def format(self, format):
"""Set time format"""
if format not in self.FORMATS:
raise ValueError(f'format must be one of {list(self.FORMATS)}')
format_cls = self.FORMATS[format]
# Get the new TimeFormat object to contain time in new format. Possibly
# coerce in/out_subfmt to '*' (default) if existing subfmt values are
# not valid in the new format.
self._time = format_cls(
self._time.jd1, self._time.jd2,
self._time._scale, self.precision,
in_subfmt=format_cls._get_allowed_subfmt(self.in_subfmt),
out_subfmt=format_cls._get_allowed_subfmt(self.out_subfmt),
from_jd=True)
self._format = format
def __repr__(self):
return ("<{} object: scale='{}' format='{}' value={}>"
.format(self.__class__.__name__, self.scale, self.format,
getattr(self, self.format)))
def __str__(self):
return str(getattr(self, self.format))
def __hash__(self):
try:
loc = getattr(self, 'location', None)
if loc is not None:
loc = loc.x.to_value(u.m), loc.y.to_value(u.m), loc.z.to_value(u.m)
return hash((self.jd1, self.jd2, self.scale, loc))
except TypeError:
if self.ndim != 0:
reason = '(must be scalar)'
elif self.masked:
reason = '(value is masked)'
else:
raise
raise TypeError(f"unhashable type: '{self.__class__.__name__}' {reason}")
@property
def scale(self):
"""Time scale"""
return self._time.scale
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError("Scale {!r} is not in the allowed scales {}"
.format(scale, sorted(self.SCALES)))
if scale == 'utc' or self.scale == 'utc':
# If doing a transform involving UTC then check that the leap
# seconds table is up to date.
_check_leapsec()
# Determine the chain of scale transformations to get from the current
# scale to the new scale. MULTI_HOPS contains a dict of all
# transformations (xforms) that require intermediate xforms.
# The MULTI_HOPS dict is keyed by (sys1, sys2) in alphabetical order.
xform = (self.scale, scale)
xform_sort = tuple(sorted(xform))
multi = MULTI_HOPS.get(xform_sort, ())
xforms = xform_sort[:1] + multi + xform_sort[-1:]
# If we made the reverse xform then reverse it now.
if xform_sort != xform:
xforms = tuple(reversed(xforms))
# Transform the jd1,2 pairs through the chain of scale xforms.
jd1, jd2 = self._time.jd1, self._time.jd2_filled
for sys1, sys2 in zip(xforms[:-1], xforms[1:]):
# Some xforms require an additional delta_ argument that is
# provided through Time methods. These values may be supplied by
# the user or computed based on available approximations. The
# get_delta_ methods are available for only one combination of
# sys1, sys2 though the property applies for both xform directions.
args = [jd1, jd2]
for sys12 in ((sys1, sys2), (sys2, sys1)):
dt_method = '_get_delta_{}_{}'.format(*sys12)
try:
get_dt = getattr(self, dt_method)
except AttributeError:
pass
else:
args.append(get_dt(jd1, jd2))
break
conv_func = getattr(erfa, sys1 + sys2)
jd1, jd2 = conv_func(*args)
jd1, jd2 = day_frac(jd1, jd2)
if self.masked:
jd2[self.mask] = np.nan
self._time = self.FORMATS[self.format](jd1, jd2, scale, self.precision,
self.in_subfmt, self.out_subfmt,
from_jd=True)
@property
def precision(self):
"""
Decimal precision when outputting seconds as floating point (int
value between 0 and 9 inclusive).
"""
return self._time.precision
@precision.setter
def precision(self, val):
del self.cache
self._time.precision = val
@property
def in_subfmt(self):
"""
Unix wildcard pattern to select subformats for parsing string input
times.
"""
return self._time.in_subfmt
@in_subfmt.setter
def in_subfmt(self, val):
self._time.in_subfmt = val
del self.cache
@property
def out_subfmt(self):
"""
Unix wildcard pattern to select subformats for outputting times.
"""
return self._time.out_subfmt
@out_subfmt.setter
def out_subfmt(self, val):
# Setting the out_subfmt property here does validation of ``val``
self._time.out_subfmt = val
del self.cache
@property
def shape(self):
"""The shape of the time instances.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
ValueError
If the new shape has the wrong total number of elements.
AttributeError
If the shape of the ``jd1``, ``jd2``, ``location``,
``delta_ut1_utc``, or ``delta_tdb_tt`` attributes cannot be changed
without the arrays being copied. For these cases, use the
`Time.reshape` method (which copies any arrays that cannot be
reshaped in-place).
"""
return self._time.jd1.shape
@shape.setter
def shape(self, shape):
del self.cache
# We have to keep track of arrays that were already reshaped,
# since we may have to return those to their original shape if a later
# shape-setting fails.
reshaped = []
oldshape = self.shape
# In-place reshape of data/attributes. Need to access _time.jd1/2 not
# self.jd1/2 because the latter are not guaranteed to be the actual
# data, and in fact should not be directly changeable from the public
# API.
for obj, attr in ((self._time, 'jd1'),
(self._time, 'jd2'),
(self, '_delta_ut1_utc'),
(self, '_delta_tdb_tt'),
(self, 'location')):
val = getattr(obj, attr, None)
if val is not None and val.size > 1:
try:
val.shape = shape
except Exception:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
def _shaped_like_input(self, value):
if self._time.jd1.shape:
if isinstance(value, np.ndarray):
return value
else:
raise TypeError(
f"JD is an array ({self._time.jd1!r}) but value "
f"is not ({value!r})")
else:
# zero-dimensional array, is it safe to unbox?
if (isinstance(value, np.ndarray)
and not value.shape
and not np.ma.is_masked(value)):
if value.dtype.kind == 'M':
# existing test doesn't want datetime64 converted
return value[()]
elif value.dtype.fields:
# Unpack but keep field names; .item() doesn't
# Still don't get python types in the fields
return value[()]
else:
return value.item()
else:
return value
@property
def jd1(self):
"""
First of the two doubles that internally store time value(s) in JD.
"""
jd1 = self._time.mask_if_needed(self._time.jd1)
return self._shaped_like_input(jd1)
@property
def jd2(self):
"""
Second of the two doubles that internally store time value(s) in JD.
"""
jd2 = self._time.mask_if_needed(self._time.jd2)
return self._shaped_like_input(jd2)
def to_value(self, format, subfmt='*'):
"""Get time values expressed in specified output format.
This method allows representing the ``Time`` object in the desired
output ``format`` and optional sub-format ``subfmt``. Available
built-in formats include ``jd``, ``mjd``, ``iso``, and so forth. Each
format can have its own sub-formats
For built-in numerical formats like ``jd`` or ``unix``, ``subfmt`` can
be one of 'float', 'long', 'decimal', 'str', or 'bytes'. Here, 'long'
uses ``numpy.longdouble`` for somewhat enhanced precision (with
the enhancement depending on platform), and 'decimal'
:class:`decimal.Decimal` for full precision. For 'str' and 'bytes', the
number of digits is also chosen such that time values are represented
accurately.
For built-in date-like string formats, one of 'date_hms', 'date_hm', or
'date' (or 'longdate_hms', etc., for 5-digit years in
`~astropy.time.TimeFITS`). For sub-formats including seconds, the
number of digits used for the fractional seconds is as set by
`~astropy.time.Time.precision`.
Parameters
----------
format : str
The format in which one wants the time values. Default: the current
format.
subfmt : str or None, optional
Value or wildcard pattern to select the sub-format in which the
values should be given. The default of '*' picks the first
available for a given format, i.e., 'float' or 'date_hms'.
If `None`, use the instance's ``out_subfmt``.
"""
# TODO: add a precision argument (but ensure it is keyword argument
# only, to make life easier for TimeDelta.to_value()).
if format not in self.FORMATS:
raise ValueError(f'format must be one of {list(self.FORMATS)}')
cache = self.cache['format']
# Try to keep cache behaviour like it was in astropy < 4.0.
key = format if subfmt is None else (format, subfmt)
if key not in cache:
if format == self.format:
tm = self
else:
tm = self.replicate(format=format)
# Some TimeFormat subclasses may not be able to handle being passes
# on a out_subfmt. This includes some core classes like
# TimeBesselianEpochString that do not have any allowed subfmts. But
# those do deal with `self.out_subfmt` internally, so if subfmt is
# the same, we do not pass it on.
kwargs = {}
if subfmt is not None and subfmt != tm.out_subfmt:
kwargs['out_subfmt'] = subfmt
try:
value = tm._time.to_value(parent=tm, **kwargs)
except TypeError as exc:
# Try validating subfmt, e.g. for formats like 'jyear_str' that
# do not implement out_subfmt in to_value() (because there are
# no allowed subformats). If subfmt is not valid this gives the
# same exception as would have occurred if the call to
# `to_value()` had succeeded.
tm._time._select_subfmts(subfmt)
# Subfmt was valid, so fall back to the original exception to see
# if it was lack of support for out_subfmt as a call arg.
if "unexpected keyword argument 'out_subfmt'" in str(exc):
raise ValueError(
f"to_value() method for format {format!r} does not "
f"support passing a 'subfmt' argument") from None
else:
# Some unforeseen exception so raise.
raise
value = tm._shaped_like_input(value)
cache[key] = value
return cache[key]
@property
def value(self):
"""Time value(s) in current format"""
return self.to_value(self.format, None)
@property
def masked(self):
return self._time.masked
@property
def mask(self):
return self._time.mask
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.time.Time` or `~astropy.time.TimeDelta` object.
The values to be inserted must conform to the rules for in-place setting
of ``Time`` objects (see ``Get and set values`` in the ``Time``
documentation).
The API signature matches the ``np.insert`` API, but is more limited.
The specification of insert index ``obj`` must be a single integer,
and the ``axis`` must be ``0`` for simple row insertion before the
index.
Parameters
----------
obj : int
Integer index before which ``values`` is inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
axis : int, optional
Axis along which to insert ``values``. Default is 0, which is the
only allowed value and will insert a row.
Returns
-------
out : `~astropy.time.Time` subclass
New time object with inserted value(s)
"""
# Validate inputs: obj arg is integer, axis=0, self is not a scalar, and
# input index is in bounds.
try:
idx0 = operator.index(obj)
except TypeError:
raise TypeError('obj arg must be an integer')
if axis != 0:
raise ValueError('axis must be 0')
if not self.shape:
raise TypeError('cannot insert into scalar {} object'
.format(self.__class__.__name__))
if abs(idx0) > len(self):
raise IndexError('index {} is out of bounds for axis 0 with size {}'
.format(idx0, len(self)))
# Turn negative index into positive
if idx0 < 0:
idx0 = len(self) + idx0
# For non-Time object, use numpy to help figure out the length. (Note annoying
# case of a string input that has a length which is not the length we want).
if not isinstance(values, self.__class__):
values = np.asarray(values)
n_values = len(values) if values.shape else 1
# Finally make the new object with the correct length and set values for the
# three sections, before insert, the insert, and after the insert.
out = self.__class__.info.new_like([self], len(self) + n_values, name=self.info.name)
out._time.jd1[:idx0] = self._time.jd1[:idx0]
out._time.jd2[:idx0] = self._time.jd2[:idx0]
# This uses the Time setting machinery to coerce and validate as necessary.
out[idx0:idx0 + n_values] = values
out._time.jd1[idx0 + n_values:] = self._time.jd1[idx0:]
out._time.jd2[idx0 + n_values:] = self._time.jd2[idx0:]
return out
def __setitem__(self, item, value):
if not self.writeable:
if self.shape:
raise ValueError('{} object is read-only. Make a '
'copy() or set "writeable" attribute to True.'
.format(self.__class__.__name__))
else:
raise ValueError('scalar {} object is read-only.'
.format(self.__class__.__name__))
# Any use of setitem results in immediate cache invalidation
del self.cache
# Setting invalidates transform deltas
for attr in ('_delta_tdb_tt', '_delta_ut1_utc'):
if hasattr(self, attr):
delattr(self, attr)
if value is np.ma.masked or value is np.nan:
self._time.jd2[item] = np.nan
return
value = self._make_value_equivalent(item, value)
# Finally directly set the jd1/2 values. Locations are known to match.
if self.scale is not None:
value = getattr(value, self.scale)
self._time.jd1[item] = value._time.jd1
self._time.jd2[item] = value._time.jd2
def isclose(self, other, atol=None):
"""Returns a boolean or boolean array where two Time objects are
element-wise equal within a time tolerance.
This evaluates the expression below::
abs(self - other) <= atol
Parameters
----------
other : `~astropy.time.Time`
Time object for comparison.
atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Absolute tolerance for equality with units of time (e.g. ``u.s`` or
``u.day``). Default is two bits in the 128-bit JD time representation,
equivalent to about 40 picosecs.
"""
if atol is None:
# Note: use 2 bits instead of 1 bit based on experience in precision
# tests, since taking the difference with a UTC time means one has
# to do a scale change.
atol = 2 * np.finfo(float).eps * u.day
if not isinstance(atol, (u.Quantity, TimeDelta)):
raise TypeError("'atol' argument must be a Quantity or TimeDelta instance, got "
f'{atol.__class__.__name__} instead')
try:
# Separate these out so user sees where the problem is
dt = self - other
dt = abs(dt)
out = dt <= atol
except Exception as err:
raise TypeError("'other' argument must support subtraction with Time "
f"and return a value that supports comparison with "
f"{atol.__class__.__name__}: {err}")
return out
def copy(self, format=None):
"""
Return a fully independent copy the Time object, optionally changing
the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
In this method a full copy of the internal time arrays will be made.
The internal time arrays are normally not changeable by the user so in
most cases the ``replicate()`` method should be used.
Parameters
----------
format : str, optional
Time format of the copy.
Returns
-------
tm : Time object
Copy of this object
"""
return self._apply('copy', format=format)
def replicate(self, format=None, copy=False, cls=None):
"""
Return a replica of the Time object, optionally changing the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
If ``copy`` is set to `True` then a full copy of the internal time arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory. The internal time arrays
are normally not changeable by the user so in most cases it should not
be necessary to set ``copy`` to `True`.
The convenience method copy() is available in which ``copy`` is `True`
by default.
Parameters
----------
format : str, optional
Time format of the replica.
copy : bool, optional
Return a true copy instead of using references where possible.
Returns
-------
tm : Time object
Replica of this object
"""
return self._apply('copy' if copy else 'replicate', format=format, cls=cls)
def _apply(self, method, *args, format=None, cls=None, **kwargs):
"""Create a new time object, possibly applying a method to the arrays.
Parameters
----------
method : str or callable
If string, can be 'replicate' or the name of a relevant
`~numpy.ndarray` method. In the former case, a new time instance
with unchanged internal data is created, while in the latter the
method is applied to the internal ``jd1`` and ``jd2`` arrays, as
well as to possible ``location``, ``_delta_ut1_utc``, and
``_delta_tdb_tt`` arrays.
If a callable, it is directly applied to the above arrays.
Examples: 'copy', '__getitem__', 'reshape', `~numpy.broadcast_to`.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``. If the ``format`` keyword
argument is present, this will be used as the Time format of the
replica.
Examples
--------
Some ways this is used internally::
copy : ``_apply('copy')``
replicate : ``_apply('replicate')``
reshape : ``_apply('reshape', new_shape)``
index or slice : ``_apply('__getitem__', item)``
broadcast : ``_apply(np.broadcast, shape=new_shape)``
"""
new_format = self.format if format is None else format
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
if method == 'replicate':
apply_method = None
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
jd1, jd2 = self._time.jd1, self._time.jd2
if apply_method:
jd1 = apply_method(jd1)
jd2 = apply_method(jd2)
# Get a new instance of our class and set its attributes directly.
tm = super().__new__(cls or self.__class__)
tm._time = TimeJD(jd1, jd2, self.scale, precision=0,
in_subfmt='*', out_subfmt='*', from_jd=True)
# Optional ndarray attributes.
for attr in ('_delta_ut1_utc', '_delta_tdb_tt', 'location'):
try:
val = getattr(self, attr)
except AttributeError:
continue
if apply_method:
# Apply the method to any value arrays (though skip if there is
# only an array scalar and the method would return a view,
# since in that case nothing would change).
if getattr(val, 'shape', ()):
val = apply_method(val)
elif method == 'copy' or method == 'flatten':
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
val = copy.copy(val)
setattr(tm, attr, val)
# Copy other 'info' attr only if it has actually been defined and the
# time object is not a scalar (issue #10688).
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if 'info' in self.__dict__:
tm.info = self.info
# Make the new internal _time object corresponding to the format
# in the copy. If the format is unchanged this process is lightweight
# and does not create any new arrays.
if new_format not in tm.FORMATS:
raise ValueError(f'format must be one of {list(tm.FORMATS)}')
NewFormat = tm.FORMATS[new_format]
tm._time = NewFormat(
tm._time.jd1, tm._time.jd2,
tm._time._scale,
precision=self.precision,
in_subfmt=NewFormat._get_allowed_subfmt(self.in_subfmt),
out_subfmt=NewFormat._get_allowed_subfmt(self.out_subfmt),
from_jd=True)
tm._format = new_format
tm.SCALES = self.SCALES
return tm
def __copy__(self):
"""
Overrides the default behavior of the `copy.copy` function in
the python stdlib to behave like `Time.copy`. Does *not* make a
copy of the JD arrays - only copies by reference.
"""
return self.replicate()
def __deepcopy__(self, memo):
"""
Overrides the default behavior of the `copy.deepcopy` function
in the python stdlib to behave like `Time.copy`. Does make a
copy of the JD arrays.
"""
return self.copy()
def _advanced_index(self, indices, axis=None, keepdims=False):
"""Turn argmin, argmax output into an advanced index.
Argmin, argmax output contains indices along a given axis in an array
shaped like the other dimensions. To use this to get values at the
correct location, a list is constructed in which the other axes are
indexed sequentially. For ``keepdims`` is ``True``, the net result is
the same as constructing an index grid with ``np.ogrid`` and then
replacing the ``axis`` item with ``indices`` with its shaped expanded
at ``axis``. For ``keepdims`` is ``False``, the result is the same but
with the ``axis`` dimension removed from all list entries.
For ``axis`` is ``None``, this calls :func:`~numpy.unravel_index`.
Parameters
----------
indices : array
Output of argmin or argmax.
axis : int or None
axis along which argmin or argmax was used.
keepdims : bool
Whether to construct indices that keep or remove the axis along
which argmin or argmax was used. Default: ``False``.
Returns
-------
advanced_index : list of arrays
Suitable for use as an advanced index.
"""
if axis is None:
return np.unravel_index(indices, self.shape)
ndim = self.ndim
if axis < 0:
axis = axis + ndim
if keepdims and indices.ndim < self.ndim:
indices = np.expand_dims(indices, axis)
index = [indices
if i == axis
else np.arange(s).reshape(
(1,) * (i if keepdims or i < axis else i - 1)
+ (s,)
+ (1,) * (ndim - i - (1 if keepdims or i > axis else 2))
)
for i, s in enumerate(self.shape)]
return tuple(index)
def argmin(self, axis=None, out=None):
"""Return indices of the minimum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmin`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmin` for detailed documentation.
"""
# First get the minimum at normal precision.
jd1, jd2 = self.jd1, self.jd2
approx = np.min(jd1 + jd2, axis, keepdims=True)
# Approx is very close to the true minimum, and by subtracting it at
# full precision, all numbers near 0 can be represented correctly,
# so we can be sure we get the true minimum.
# The below is effectively what would be done for
# dt = (self - self.__class__(approx, format='jd')).jd
# which translates to:
# approx_jd1, approx_jd2 = day_frac(approx, 0.)
# dt = (self.jd1 - approx_jd1) + (self.jd2 - approx_jd2)
dt = (jd1 - approx) + jd2
return dt.argmin(axis, out)
def argmax(self, axis=None, out=None):
"""Return indices of the maximum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmax`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmax` for detailed documentation.
"""
# For procedure, see comment on argmin.
jd1, jd2 = self.jd1, self.jd2
approx = np.max(jd1 + jd2, axis, keepdims=True)
dt = (jd1 - approx) + jd2
return dt.argmax(axis, out)
def argsort(self, axis=-1):
"""Returns the indices that would sort the time array.
This is similar to :meth:`~numpy.ndarray.argsort`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied. Internally,
it uses :func:`~numpy.lexsort`, and hence no sort method can be chosen.
"""
# For procedure, see comment on argmin.
jd1, jd2 = self.jd1, self.jd2
approx = jd1 + jd2
remainder = (jd1 - approx) + jd2
if axis is None:
return np.lexsort((remainder.ravel(), approx.ravel()))
else:
return np.lexsort(keys=(remainder, approx), axis=axis)
def min(self, axis=None, out=None, keepdims=False):
"""Minimum along a given axis.
This is similar to :meth:`~numpy.ndarray.min`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.min``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError("Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``.")
return self[self._advanced_index(self.argmin(axis), axis, keepdims)]
def max(self, axis=None, out=None, keepdims=False):
"""Maximum along a given axis.
This is similar to :meth:`~numpy.ndarray.max`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.max``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError("Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``.")
return self[self._advanced_index(self.argmax(axis), axis, keepdims)]
def ptp(self, axis=None, out=None, keepdims=False):
"""Peak to peak (maximum - minimum) along a given axis.
This is similar to :meth:`~numpy.ndarray.ptp`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used.
Note that the ``out`` argument is present only for compatibility with
`~numpy.ptp`; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError("Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``.")
return (self.max(axis, keepdims=keepdims)
- self.min(axis, keepdims=keepdims))
def sort(self, axis=-1):
"""Return a copy sorted along the specified axis.
This is similar to :meth:`~numpy.ndarray.sort`, but internally uses
indexing with :func:`~numpy.lexsort` to ensure that the full precision
given by the two doubles ``jd1`` and ``jd2`` is kept, and that
corresponding attributes are properly sorted and copied as well.
Parameters
----------
axis : int or None
Axis to be sorted. If ``None``, the flattened array is sorted.
By default, sort over the last axis.
"""
return self[self._advanced_index(self.argsort(axis), axis,
keepdims=True)]
@property
def cache(self):
"""
Return the cache associated with this instance.
"""
return self._time.cache
@cache.deleter
def cache(self):
del self._time.cache
def __getattr__(self, attr):
"""
Get dynamic attributes to output format or do timescale conversion.
"""
if attr in self.SCALES and self.scale is not None:
cache = self.cache['scale']
if attr not in cache:
if attr == self.scale:
tm = self
else:
tm = self.replicate()
tm._set_scale(attr)
if tm.shape:
# Prevent future modification of cached array-like object
tm.writeable = False
cache[attr] = tm
return cache[attr]
elif attr in self.FORMATS:
return self.to_value(attr, subfmt=None)
elif attr in TIME_SCALES: # allowed ones done above (self.SCALES)
if self.scale is None:
raise ScaleValueError("Cannot convert TimeDelta with "
"undefined scale to any defined scale.")
else:
raise ScaleValueError("Cannot convert {} with scale "
"'{}' to scale '{}'"
.format(self.__class__.__name__,
self.scale, attr))
else:
# Should raise AttributeError
return self.__getattribute__(attr)
@override__dir__
def __dir__(self):
result = set(self.SCALES)
result.update(self.FORMATS)
return result
def _match_shape(self, val):
"""
Ensure that `val` is matched to length of self. If val has length 1
then broadcast, otherwise cast to double and make sure shape matches.
"""
val = _make_array(val, copy=True) # be conservative and copy
if val.size > 1 and val.shape != self.shape:
try:
# check the value can be broadcast to the shape of self.
val = np.broadcast_to(val, self.shape, subok=True)
except Exception:
raise ValueError('Attribute shape must match or be '
'broadcastable to that of Time object. '
'Typically, give either a single value or '
'one for each time.')
return val
def _time_comparison(self, other, op):
"""If other is of same class as self, compare difference in self.scale.
Otherwise, return NotImplemented
"""
if other.__class__ is not self.__class__:
try:
other = self.__class__(other, scale=self.scale)
except Exception:
# Let other have a go.
return NotImplemented
if(self.scale is not None and self.scale not in other.SCALES
or other.scale is not None and other.scale not in self.SCALES):
# Other will also not be able to do it, so raise a TypeError
# immediately, allowing us to explain why it doesn't work.
raise TypeError("Cannot compare {} instances with scales "
"'{}' and '{}'".format(self.__class__.__name__,
self.scale, other.scale))
if self.scale is not None and other.scale is not None:
other = getattr(other, self.scale)
return op((self.jd1 - other.jd1) + (self.jd2 - other.jd2), 0.)
def __lt__(self, other):
return self._time_comparison(other, operator.lt)
def __le__(self, other):
return self._time_comparison(other, operator.le)
def __eq__(self, other):
"""
If other is an incompatible object for comparison, return `False`.
Otherwise, return `True` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.eq)
def __ne__(self, other):
"""
If other is an incompatible object for comparison, return `True`.
Otherwise, return `False` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.ne)
def __gt__(self, other):
return self._time_comparison(other, operator.gt)
def __ge__(self, other):
return self._time_comparison(other, operator.ge)
class Time(TimeBase):
"""
Represent and manipulate times and dates for astronomy.
A `Time` object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format`` and must correspond to the specified time ``scale``. The
optional ``val2`` time input should be supplied only for numeric input
formats (e.g. JD) where very high precision (better than 64-bit precision)
is required.
The allowed values for ``format`` can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date',
'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64',
'fits', 'byear', 'jyear', 'byear_str', 'jyear_str']
See also: http://docs.astropy.org/en/stable/time/
Parameters
----------
val : sequence, ndarray, number, str, bytes, or `~astropy.time.Time` object
Value(s) to initialize the time or times. Bytes are decoded as ascii.
val2 : sequence, ndarray, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
format : str, optional
Format of input value(s)
scale : str, optional
Time scale of input value(s), must be one of the following:
('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')
precision : int, optional
Digits of precision in string representation of time
in_subfmt : str, optional
Unix glob to select subformats for parsing input times
out_subfmt : str, optional
Unix glob to select subformat for outputting times
location : `~astropy.coordinates.EarthLocation` or tuple, optional
If given as an tuple, it should be able to initialize an
an EarthLocation instance, i.e., either contain 3 items with units of
length for geocentric coordinates, or contain a longitude, latitude,
and an optional height for geodetic coordinates.
Can be a single location, or one for each input time.
If not given, assumed to be the center of the Earth for time scale
transformations to and from the solar-system barycenter.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_SCALES
"""List of time scales"""
FORMATS = TIME_FORMATS
"""Dict of time formats"""
def __new__(cls, val, val2=None, format=None, scale=None,
precision=None, in_subfmt=None, out_subfmt=None,
location=None, copy=False):
if isinstance(val, Time):
self = val.replicate(format=format, copy=copy, cls=cls)
else:
self = super().__new__(cls)
return self
def __init__(self, val, val2=None, format=None, scale=None,
precision=None, in_subfmt=None, out_subfmt=None,
location=None, copy=False):
if location is not None:
from astropy.coordinates import EarthLocation
if isinstance(location, EarthLocation):
self.location = location
else:
self.location = EarthLocation(*location)
if self.location.size == 1:
self.location = self.location.squeeze()
else:
if not hasattr(self, 'location'):
self.location = None
if isinstance(val, Time):
# Update _time formatting parameters if explicitly specified
if precision is not None:
self._time.precision = precision
if in_subfmt is not None:
self._time.in_subfmt = in_subfmt
if out_subfmt is not None:
self._time.out_subfmt = out_subfmt
self.SCALES = TIME_TYPES[self.scale]
if scale is not None:
self._set_scale(scale)
else:
self._init_from_vals(val, val2, format, scale, copy,
precision, in_subfmt, out_subfmt)
self.SCALES = TIME_TYPES[self.scale]
if self.location is not None and (self.location.size > 1
and self.location.shape != self.shape):
try:
# check the location can be broadcast to self's shape.
self.location = np.broadcast_to(self.location, self.shape,
subok=True)
except Exception as err:
raise ValueError('The location with shape {} cannot be '
'broadcast against time with shape {}. '
'Typically, either give a single location or '
'one for each time.'
.format(self.location.shape, self.shape)) from err
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent Time object"""
# If there is a vector location then broadcast to the Time shape
# and then select with ``item``
if self.location is not None and self.location.shape:
self_location = np.broadcast_to(self.location, self.shape, subok=True)[item]
else:
self_location = self.location
if isinstance(value, Time):
# Make sure locations are compatible. Location can be either None or
# a Location object.
if self_location is None and value.location is None:
match = True
elif ((self_location is None and value.location is not None)
or (self_location is not None and value.location is None)):
match = False
else:
match = np.all(self_location == value.location)
if not match:
raise ValueError('cannot set to Time with different location: '
'expected location={} and '
'got location={}'
.format(self_location, value.location))
else:
try:
value = self.__class__(value, scale=self.scale, location=self_location)
except Exception:
try:
value = self.__class__(value, scale=self.scale, format=self.format,
location=self_location)
except Exception as err:
raise ValueError('cannot convert value to a compatible Time object: {}'
.format(err))
return value
@classmethod
def now(cls):
"""
Creates a new object corresponding to the instant in time this
method is called.
.. note::
"Now" is determined using the `~datetime.datetime.utcnow`
function, so its accuracy and precision is determined by that
function. Generally that means it is set by the accuracy of
your system clock.
Returns
-------
nowtime : :class:`~astropy.time.Time`
A new `Time` object (or a subclass of `Time` if this is called from
such a subclass) at the current time.
"""
# call `utcnow` immediately to be sure it's ASAP
dtnow = datetime.utcnow()
return cls(val=dtnow, format='datetime', scale='utc')
info = TimeInfo()
@classmethod
def strptime(cls, time_string, format_string, **kwargs):
"""
Parse a string to a Time according to a format specification.
See `time.strptime` documentation for format specification.
>>> Time.strptime('2012-Jun-30 23:59:60', '%Y-%b-%d %H:%M:%S')
<Time object: scale='utc' format='isot' value=2012-06-30T23:59:60.000>
Parameters
----------
time_string : str, sequence, or ndarray
Objects containing time data of type string
format_string : str
String specifying format of time_string.
kwargs : dict
Any keyword arguments for ``Time``. If the ``format`` keyword
argument is present, this will be used as the Time format.
Returns
-------
time_obj : `~astropy.time.Time`
A new `~astropy.time.Time` object corresponding to the input
``time_string``.
"""
time_array = np.asarray(time_string)
if time_array.dtype.kind not in ('U', 'S'):
err = "Expected type is string, a bytes-like object or a sequence"\
" of these. Got dtype '{}'".format(time_array.dtype.kind)
raise TypeError(err)
to_string = (str if time_array.dtype.kind == 'U' else
lambda x: str(x.item(), encoding='ascii'))
iterator = np.nditer([time_array, None],
op_dtypes=[time_array.dtype, 'U30'])
for time, formatted in iterator:
tt, fraction = _strptime._strptime(to_string(time), format_string)
time_tuple = tt[:6] + (fraction,)
formatted[...] = '{:04}-{:02}-{:02}T{:02}:{:02}:{:02}.{:06}'\
.format(*time_tuple)
format = kwargs.pop('format', None)
out = cls(*iterator.operands[1:], format='isot', **kwargs)
if format is not None:
out.format = format
return out
def strftime(self, format_spec):
"""
Convert Time to a string or a numpy.array of strings according to a
format specification.
See `time.strftime` documentation for format specification.
Parameters
----------
format_spec : str
Format definition of return string.
Returns
-------
formatted : str or numpy.array
String or numpy.array of strings formatted according to the given
format string.
"""
formatted_strings = []
for sk in self.replicate('iso')._time.str_kwargs():
date_tuple = date(sk['year'], sk['mon'], sk['day']).timetuple()
datetime_tuple = (sk['year'], sk['mon'], sk['day'],
sk['hour'], sk['min'], sk['sec'],
date_tuple[6], date_tuple[7], -1)
fmtd_str = format_spec
if '%f' in fmtd_str:
fmtd_str = fmtd_str.replace('%f', '{frac:0{precision}}'.format(
frac=sk['fracsec'], precision=self.precision))
fmtd_str = strftime(fmtd_str, datetime_tuple)
formatted_strings.append(fmtd_str)
if self.isscalar:
return formatted_strings[0]
else:
return np.array(formatted_strings).reshape(self.shape)
def light_travel_time(self, skycoord, kind='barycentric', location=None, ephemeris=None):
"""Light travel time correction to the barycentre or heliocentre.
The frame transformations used to calculate the location of the solar
system barycentre and the heliocentre rely on the erfa routine epv00,
which is consistent with the JPL DE405 ephemeris to an accuracy of
11.2 km, corresponding to a light travel time of 4 microseconds.
The routine assumes the source(s) are at large distance, i.e., neglects
finite-distance effects.
Parameters
----------
skycoord : `~astropy.coordinates.SkyCoord`
The sky location to calculate the correction for.
kind : str, optional
``'barycentric'`` (default) or ``'heliocentric'``
location : `~astropy.coordinates.EarthLocation`, optional
The location of the observatory to calculate the correction for.
If no location is given, the ``location`` attribute of the Time
object is used
ephemeris : str, optional
Solar system ephemeris to use (e.g., 'builtin', 'jpl'). By default,
use the one set with ``astropy.coordinates.solar_system_ephemeris.set``.
For more information, see `~astropy.coordinates.solar_system_ephemeris`.
Returns
-------
time_offset : `~astropy.time.TimeDelta`
The time offset between the barycentre or Heliocentre and Earth,
in TDB seconds. Should be added to the original time to get the
time in the Solar system barycentre or the Heliocentre.
Also, the time conversion to BJD will then include the relativistic correction as well.
"""
if kind.lower() not in ('barycentric', 'heliocentric'):
raise ValueError("'kind' parameter must be one of 'heliocentric' "
"or 'barycentric'")
if location is None:
if self.location is None:
raise ValueError('An EarthLocation needs to be set or passed '
'in to calculate bary- or heliocentric '
'corrections')
location = self.location
from astropy.coordinates import (UnitSphericalRepresentation, CartesianRepresentation,
HCRS, ICRS, GCRS, solar_system_ephemeris)
# ensure sky location is ICRS compatible
if not skycoord.is_transformable_to(ICRS()):
raise ValueError("Given skycoord is not transformable to the ICRS")
# get location of observatory in ITRS coordinates at this Time
try:
itrs = location.get_itrs(obstime=self)
except Exception:
raise ValueError("Supplied location does not have a valid `get_itrs` method")
with solar_system_ephemeris.set(ephemeris):
if kind.lower() == 'heliocentric':
# convert to heliocentric coordinates, aligned with ICRS
cpos = itrs.transform_to(HCRS(obstime=self)).cartesian.xyz
else:
# first we need to convert to GCRS coordinates with the correct
# obstime, since ICRS coordinates have no frame time
gcrs_coo = itrs.transform_to(GCRS(obstime=self))
# convert to barycentric (BCRS) coordinates, aligned with ICRS
cpos = gcrs_coo.transform_to(ICRS()).cartesian.xyz
# get unit ICRS vector to star
spos = (skycoord.icrs.represent_as(UnitSphericalRepresentation).
represent_as(CartesianRepresentation).xyz)
# Move X,Y,Z to last dimension, to enable possible broadcasting below.
cpos = np.rollaxis(cpos, 0, cpos.ndim)
spos = np.rollaxis(spos, 0, spos.ndim)
# calculate light travel time correction
tcor_val = (spos * cpos).sum(axis=-1) / const.c
return TimeDelta(tcor_val, scale='tdb')
def earth_rotation_angle(self, longitude=None):
"""Calculate local Earth rotation angle.
Parameters
----------
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'tio', the result will be relative to the Terrestrial
Intermediate Origin (TIO) (i.e., the output of `~erfa.era00`).
Returns
-------
`~astropy.coordinates.Longitude`
Local Earth rotation angle with units of hourangle.
See Also
--------
astropy.time.Time.sidereal_time
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
The result includes the TIO locator (s'), which positions the Terrestrial
Intermediate Origin on the equator of the Celestial Intermediate Pole (CIP)
and is rigorously corrected for polar motion.
(except when ``longitude='tio'``).
""" # noqa
if isinstance(longitude, str) and longitude == 'tio':
longitude = 0
include_tio = False
else:
include_tio = True
return self._sid_time_or_earth_rot_ang(longitude=longitude,
function=erfa.era00, scales=('ut1',),
include_tio=include_tio)
def sidereal_time(self, kind, longitude=None, model=None):
"""Calculate sidereal time.
Parameters
----------
kind : str
``'mean'`` or ``'apparent'``, i.e., accounting for precession
only, or also for nutation.
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'greenwich' or 'tio', the result will be relative to longitude
0 for models before 2000, and relative to the Terrestrial Intermediate
Origin (TIO) for later ones (i.e., the output of the relevant ERFA
function that calculates greenwich sidereal time).
model : str or None; optional
Precession (and nutation) model to use. The available ones are:
- {0}: {1}
- {2}: {3}
If `None` (default), the last (most recent) one from the appropriate
list above is used.
Returns
-------
`~astropy.coordinates.Longitude`
Local sidereal time, with units of hourangle.
See Also
--------
astropy.time.Time.earth_rotation_angle
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
For the IAU precession models from 2000 onwards, the result includes the
TIO locator (s'), which positions the Terrestrial Intermediate Origin on
the equator of the Celestial Intermediate Pole (CIP) and is rigorously
corrected for polar motion (except when ``longitude='tio'`` or ``'greenwich'``).
""" # noqa (docstring is formatted below)
if kind.lower() not in SIDEREAL_TIME_MODELS.keys():
raise ValueError('The kind of sidereal time has to be {}'.format(
' or '.join(sorted(SIDEREAL_TIME_MODELS.keys()))))
available_models = SIDEREAL_TIME_MODELS[kind.lower()]
if model is None:
model = sorted(available_models.keys())[-1]
elif model.upper() not in available_models:
raise ValueError(
'Model {} not implemented for {} sidereal time; '
'available models are {}'
.format(model, kind, sorted(available_models.keys())))
model_kwargs = available_models[model.upper()]
if isinstance(longitude, str) and longitude in ('tio', 'greenwich'):
longitude = 0
model_kwargs = model_kwargs.copy()
model_kwargs['include_tio'] = False
return self._sid_time_or_earth_rot_ang(longitude=longitude, **model_kwargs)
if isinstance(sidereal_time.__doc__, str):
sidereal_time.__doc__ = sidereal_time.__doc__.format(
'apparent', sorted(SIDEREAL_TIME_MODELS['apparent'].keys()),
'mean', sorted(SIDEREAL_TIME_MODELS['mean'].keys()))
def _sid_time_or_earth_rot_ang(self, longitude, function, scales, include_tio=True):
"""Calculate a local sidereal time or Earth rotation angle.
Parameters
----------
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance.
function : callable
The ERFA function to use.
scales : tuple of str
The time scales that the function requires on input.
include_tio : bool, optional
Whether to includes the TIO locator corrected for polar motion.
Should be `False` for pre-2000 IAU models. Default: `True`.
Returns
-------
`~astropy.coordinates.Longitude`
Local sidereal time or Earth rotation angle, with units of hourangle.
""" # noqa
from astropy.coordinates import Longitude, EarthLocation
from astropy.coordinates.builtin_frames.utils import get_polar_motion
from astropy.coordinates.matrix_utilities import rotation_matrix
if longitude is None:
if self.location is None:
raise ValueError('No longitude is given but the location for '
'the Time object is not set.')
longitude = self.location.lon
elif isinstance(longitude, EarthLocation):
longitude = longitude.lon
else:
# Sanity check on input; default unit is degree.
longitude = Longitude(longitude, u.degree, copy=False)
theta = self._call_erfa(function, scales)
if include_tio:
# TODO: this duplicates part of coordinates.erfa_astrom.ErfaAstrom.apio;
# maybe posisble to factor out to one or the other.
sp = self._call_erfa(erfa.sp00, ('tt',))
xp, yp = get_polar_motion(self)
# Form the rotation matrix, CIRS to apparent [HA,Dec].
r = (rotation_matrix(longitude, 'z')
@ rotation_matrix(-yp, 'x', unit=u.radian)
@ rotation_matrix(-xp, 'y', unit=u.radian)
@ rotation_matrix(theta + sp, 'z', unit=u.radian))
# Solve for angle.
angle = np.arctan2(r[..., 0, 1], r[..., 0, 0]) << u.radian
else:
angle = longitude + (theta << u.radian)
return Longitude(angle, u.hourangle)
def _call_erfa(self, function, scales):
# TODO: allow erfa functions to be used on Time with __array_ufunc__.
erfa_parameters = [getattr(getattr(self, scale)._time, jd_part)
for scale in scales
for jd_part in ('jd1', 'jd2_filled')]
result = function(*erfa_parameters)
if self.masked:
result[self.mask] = np.nan
return result
def get_delta_ut1_utc(self, iers_table=None, return_status=False):
"""Find UT1 - UTC differences by interpolating in IERS Table.
Parameters
----------
iers_table : `~astropy.utils.iers.IERS`, optional
Table containing UT1-UTC differences from IERS Bulletins A
and/or B. Default: `~astropy.utils.iers.earth_orientation_table`
(which in turn defaults to the combined version provided by
`~astropy.utils.iers.IERS_Auto`).
return_status : bool
Whether to return status values. If `False` (default), iers
raises `IndexError` if any time is out of the range
covered by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status=`True```)::
``astropy.utils.iers.FROM_IERS_B``
``astropy.utils.iers.FROM_IERS_A``
``astropy.utils.iers.FROM_IERS_A_PREDICTION``
``astropy.utils.iers.TIME_BEFORE_IERS_RANGE``
``astropy.utils.iers.TIME_BEYOND_IERS_RANGE``
Notes
-----
In normal usage, UT1-UTC differences are calculated automatically
on the first instance ut1 is needed.
Examples
--------
To check in code whether any times are before the IERS table range::
>>> from astropy.utils.iers import TIME_BEFORE_IERS_RANGE
>>> t = Time(['1961-01-01', '2000-01-01'], scale='utc')
>>> delta, status = t.get_delta_ut1_utc(return_status=True) # doctest: +REMOTE_DATA
>>> status == TIME_BEFORE_IERS_RANGE # doctest: +REMOTE_DATA
array([ True, False]...)
"""
if iers_table is None:
from astropy.utils.iers import earth_orientation_table
iers_table = earth_orientation_table.get()
return iers_table.ut1_utc(self.utc, return_status=return_status)
# Property for ERFA DUT arg = UT1 - UTC
def _get_delta_ut1_utc(self, jd1=None, jd2=None):
"""
Get ERFA DUT arg = UT1 - UTC. This getter takes optional jd1 and
jd2 args because it gets called that way when converting time scales.
If delta_ut1_utc is not yet set, this will interpolate them from the
the IERS table.
"""
# Sec. 4.3.1: the arg DUT is the quantity delta_UT1 = UT1 - UTC in
# seconds. It is obtained from tables published by the IERS.
if not hasattr(self, '_delta_ut1_utc'):
from astropy.utils.iers import earth_orientation_table
iers_table = earth_orientation_table.get()
# jd1, jd2 are normally set (see above), except if delta_ut1_utc
# is access directly; ensure we behave as expected for that case
if jd1 is None:
self_utc = self.utc
jd1, jd2 = self_utc._time.jd1, self_utc._time.jd2_filled
scale = 'utc'
else:
scale = self.scale
# interpolate UT1-UTC in IERS table
delta = iers_table.ut1_utc(jd1, jd2)
# if we interpolated using UT1 jds, we may be off by one
# second near leap seconds (and very slightly off elsewhere)
if scale == 'ut1':
# calculate UTC using the offset we got; the ERFA routine
# is tolerant of leap seconds, so will do this right
jd1_utc, jd2_utc = erfa.ut1utc(jd1, jd2, delta.to_value(u.s))
# calculate a better estimate using the nearly correct UTC
delta = iers_table.ut1_utc(jd1_utc, jd2_utc)
self._set_delta_ut1_utc(delta)
return self._delta_ut1_utc
def _set_delta_ut1_utc(self, val):
del self.cache
if hasattr(val, 'to'): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_ut1_utc = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_ut1_utc = property(_get_delta_ut1_utc, _set_delta_ut1_utc)
"""UT1 - UTC time scale offset"""
# Property for ERFA DTR arg = TDB - TT
def _get_delta_tdb_tt(self, jd1=None, jd2=None):
if not hasattr(self, '_delta_tdb_tt'):
# If jd1 and jd2 are not provided (which is the case for property
# attribute access) then require that the time scale is TT or TDB.
# Otherwise the computations here are not correct.
if jd1 is None or jd2 is None:
if self.scale not in ('tt', 'tdb'):
raise ValueError('Accessing the delta_tdb_tt attribute '
'is only possible for TT or TDB time '
'scales')
else:
jd1 = self._time.jd1
jd2 = self._time.jd2_filled
# First go from the current input time (which is either
# TDB or TT) to an approximate UT1. Since TT and TDB are
# pretty close (few msec?), assume TT. Similarly, since the
# UT1 terms are very small, use UTC instead of UT1.
njd1, njd2 = erfa.tttai(jd1, jd2)
njd1, njd2 = erfa.taiutc(njd1, njd2)
# subtract 0.5, so UT is fraction of the day from midnight
ut = day_frac(njd1 - 0.5, njd2)[1]
if self.location is None:
# Assume geocentric.
self._delta_tdb_tt = erfa.dtdb(jd1, jd2, ut, 0., 0., 0.)
else:
location = self.location
# Geodetic params needed for d_tdb_tt()
lon = location.lon
rxy = np.hypot(location.x, location.y)
z = location.z
self._delta_tdb_tt = erfa.dtdb(
jd1, jd2, ut, lon.to_value(u.radian),
rxy.to_value(u.km), z.to_value(u.km))
return self._delta_tdb_tt
def _set_delta_tdb_tt(self, val):
del self.cache
if hasattr(val, 'to'): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_tdb_tt = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_tdb_tt = property(_get_delta_tdb_tt, _set_delta_tdb_tt)
"""TDB - TT time scale offset"""
def __sub__(self, other):
# T - Tdelta = T
# T - T = Tdelta
other_is_delta = not isinstance(other, Time)
if other_is_delta: # T - Tdelta
# Check other is really a TimeDelta or something that can initialize.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# we need a constant scale to calculate, which is guaranteed for
# TimeDelta, but not for Time (which can be UTC)
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale('tai')
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError("Cannot subtract Time and TimeDelta instances "
"with scales '{}' and '{}'"
.format(self.scale, other.scale))
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ('_delta_ut1_utc', '_delta_tdb_tt'):
if hasattr(out, attr):
delattr(out, attr)
else: # T - T
# the scales should be compatible (e.g., cannot convert TDB to LOCAL)
if other.scale not in self.SCALES:
raise TypeError("Cannot subtract Time instances "
"with scales '{}' and '{}'"
.format(self.scale, other.scale))
self_time = (self._time if self.scale in TIME_DELTA_SCALES
else self.tai._time)
# set up TimeDelta, subtraction to be done shortly
out = TimeDelta(self_time.jd1, self_time.jd2, format='jd',
scale=self_time.scale)
if other.scale != out.scale:
other = getattr(other, out.scale)
jd1 = out._time.jd1 - other._time.jd1
jd2 = out._time.jd2 - other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
if other_is_delta:
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
def __add__(self, other):
# T + Tdelta = T
# T + T = error
if isinstance(other, Time):
raise OperandTypeError(self, other, '+')
# Check other is really a TimeDelta or something that can initialize.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# ideally, we calculate in the scale of the Time item, since that is
# what we want the output in, but this may not be possible, since
# TimeDelta cannot be converted arbitrarily
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale('tai')
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError("Cannot add Time and TimeDelta instances "
"with scales '{}' and '{}'"
.format(self.scale, other.scale))
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ('_delta_ut1_utc', '_delta_tdb_tt'):
if hasattr(out, attr):
delattr(out, attr)
jd1 = out._time.jd1 + other._time.jd1
jd2 = out._time.jd2 + other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
# Reverse addition is possible: <something-Tdelta-ish> + T
# but there is no case of <something> - T, so no __rsub__.
def __radd__(self, other):
return self.__add__(other)
def __array_function__(self, function, types, args, kwargs):
"""
Wrap numpy functions.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
"""
if function in CUSTOM_FUNCTIONS:
f = CUSTOM_FUNCTIONS[function]
return f(*args, **kwargs)
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
return super().__array_function__(function, types, args, kwargs)
def to_datetime(self, timezone=None):
# TODO: this could likely go through to_value, as long as that
# had an **kwargs part that was just passed on to _time.
tm = self.replicate(format='datetime')
return tm._shaped_like_input(tm._time.to_value(timezone))
to_datetime.__doc__ = TimeDatetime.to_value.__doc__
class TimeDeltaMissingUnitWarning(AstropyDeprecationWarning):
"""Warning for missing unit or format in TimeDelta"""
pass
class TimeDelta(TimeBase):
"""
Represent the time difference between two times.
A TimeDelta object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format``. The optional ``val2`` time input should be supplied only for
numeric input formats (e.g. JD) where very high precision (better than
64-bit precision) is required.
The allowed values for ``format`` can be listed with::
>>> list(TimeDelta.FORMATS)
['sec', 'jd', 'datetime']
Note that for time differences, the scale can be among three groups:
geocentric ('tai', 'tt', 'tcg'), barycentric ('tcb', 'tdb'), and rotational
('ut1'). Within each of these, the scales for time differences are the
same. Conversion between geocentric and barycentric is possible, as there
is only a scale factor change, but one cannot convert to or from 'ut1', as
this requires knowledge of the actual times, not just their difference. For
a similar reason, 'utc' is not a valid scale for a time difference: a UTC
day is not always 86400 seconds.
See also:
- https://docs.astropy.org/en/stable/time/
- https://docs.astropy.org/en/stable/time/index.html#time-deltas
Parameters
----------
val : sequence, ndarray, number, `~astropy.units.Quantity` or `~astropy.time.TimeDelta` object
Value(s) to initialize the time difference(s). Any quantities will
be converted appropriately (with care taken to avoid rounding
errors for regular time units).
val2 : sequence, ndarray, number, or `~astropy.units.Quantity`; optional
Additional values, as needed to preserve precision.
format : str, optional
Format of input value(s). For numerical inputs without units,
"jd" is assumed and values are interpreted as days.
A deprecation warning is raised in this case. To avoid the warning,
either specify the format or add units to the input values.
scale : str, optional
Time scale of input value(s), must be one of the following values:
('tdb', 'tt', 'ut1', 'tcg', 'tcb', 'tai'). If not given (or
``None``), the scale is arbitrary; when added or subtracted from a
``Time`` instance, it will be used without conversion.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_DELTA_SCALES
"""List of time delta scales."""
FORMATS = TIME_DELTA_FORMATS
"""Dict of time delta formats."""
info = TimeDeltaInfo()
def __new__(cls, val, val2=None, format=None, scale=None,
precision=None, in_subfmt=None, out_subfmt=None,
location=None, copy=False):
if isinstance(val, TimeDelta):
self = val.replicate(format=format, copy=copy, cls=cls)
else:
self = super().__new__(cls)
return self
def __init__(self, val, val2=None, format=None, scale=None, copy=False):
if isinstance(val, TimeDelta):
if scale is not None:
self._set_scale(scale)
else:
format = format or self._get_format(val)
self._init_from_vals(val, val2, format, scale, copy)
if scale is not None:
self.SCALES = TIME_DELTA_TYPES[scale]
@staticmethod
def _get_format(val):
if isinstance(val, timedelta):
return 'datetime'
if getattr(val, 'unit', None) is None:
warn('Numerical value without unit or explicit format passed to'
' TimeDelta, assuming days', TimeDeltaMissingUnitWarning)
return 'jd'
def replicate(self, *args, **kwargs):
out = super().replicate(*args, **kwargs)
out.SCALES = self.SCALES
return out
def to_datetime(self):
"""
Convert to ``datetime.timedelta`` object.
"""
tm = self.replicate(format='datetime')
return tm._shaped_like_input(tm._time.value)
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError("Scale {!r} is not in the allowed scales {}"
.format(scale, sorted(self.SCALES)))
# For TimeDelta, there can only be a change in scale factor,
# which is written as time2 - time1 = scale_offset * time1
scale_offset = SCALE_OFFSETS[(self.scale, scale)]
if scale_offset is None:
self._time.scale = scale
else:
jd1, jd2 = self._time.jd1, self._time.jd2
offset1, offset2 = day_frac(jd1, jd2, factor=scale_offset)
self._time = self.FORMATS[self.format](
jd1 + offset1, jd2 + offset2, scale,
self.precision, self.in_subfmt,
self.out_subfmt, from_jd=True)
def _add_sub(self, other, op):
"""Perform common elements of addition / subtraction for two delta times"""
# If not a TimeDelta then see if it can be turned into a TimeDelta.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# the scales should be compatible (e.g., cannot convert TDB to TAI)
if(self.scale is not None and self.scale not in other.SCALES
or other.scale is not None and other.scale not in self.SCALES):
raise TypeError("Cannot add TimeDelta instances with scales "
"'{}' and '{}'".format(self.scale, other.scale))
# adjust the scale of other if the scale of self is set (or no scales)
if self.scale is not None or other.scale is None:
out = self.replicate()
if other.scale is not None:
other = getattr(other, self.scale)
else:
out = other.replicate()
jd1 = op(self._time.jd1, other._time.jd1)
jd2 = op(self._time.jd2, other._time.jd2)
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
return out
def __add__(self, other):
# If other is a Time then use Time.__add__ to do the calculation.
if isinstance(other, Time):
return other.__add__(self)
return self._add_sub(other, operator.add)
def __sub__(self, other):
# TimeDelta - Time is an error
if isinstance(other, Time):
raise OperandTypeError(self, other, '-')
return self._add_sub(other, operator.sub)
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
out = self.__sub__(other)
return -out
def __neg__(self):
"""Negation of a `TimeDelta` object."""
new = self.copy()
new._time.jd1 = -self._time.jd1
new._time.jd2 = -self._time.jd2
return new
def __abs__(self):
"""Absolute value of a `TimeDelta` object."""
jd1, jd2 = self._time.jd1, self._time.jd2
negative = jd1 + jd2 < 0
new = self.copy()
new._time.jd1 = np.where(negative, -jd1, jd1)
new._time.jd2 = np.where(negative, -jd2, jd2)
return new
def __mul__(self, other):
"""Multiplication of `TimeDelta` objects by numbers/arrays."""
# Check needed since otherwise the self.jd1 * other multiplication
# would enter here again (via __rmul__)
if isinstance(other, Time):
raise OperandTypeError(self, other, '*')
elif ((isinstance(other, u.UnitBase)
and other == u.dimensionless_unscaled)
or (isinstance(other, str) and other == '')):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just multiple in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=False)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) * other
except Exception:
# The various ways we could multiply all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, factor=other.value)
out = TimeDelta(jd1, jd2, format='jd', scale=self.scale)
if self.format != 'jd':
out = out.replicate(format=self.format)
return out
def __rmul__(self, other):
"""Multiplication of numbers/arrays with `TimeDelta` objects."""
return self.__mul__(other)
def __truediv__(self, other):
"""Division of `TimeDelta` objects by numbers/arrays."""
# Cannot do __mul__(1./other) as that looses precision
if ((isinstance(other, u.UnitBase)
and other == u.dimensionless_unscaled)
or (isinstance(other, str) and other == '')):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just divide in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=False)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) / other
except Exception:
# The various ways we could divide all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, divisor=other.value)
out = TimeDelta(jd1, jd2, format='jd', scale=self.scale)
if self.format != 'jd':
out = out.replicate(format=self.format)
return out
def __rtruediv__(self, other):
"""Division by `TimeDelta` objects of numbers/arrays."""
# Here, we do not have to worry about returning NotImplemented,
# since other has already had a chance to look at us.
return other / self.to(u.day)
def to(self, unit, equivalencies=[]):
"""
Convert to a quantity in the specified unit.
Parameters
----------
unit : unit-like
The unit to convert to.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no
equivalencies will be applied at all, not even any set globallyq
or within a context.
Returns
-------
quantity : `~astropy.units.Quantity`
The quantity in the units specified.
See also
--------
to_value : get the numerical value in a given unit.
"""
return u.Quantity(self._time.jd1 + self._time.jd2,
u.day).to(unit, equivalencies=equivalencies)
def to_value(self, *args, **kwargs):
"""Get time delta values expressed in specified output format or unit.
This method is flexible and handles both conversion to a specified
``TimeDelta`` format / sub-format AND conversion to a specified unit.
If positional argument(s) are provided then the first one is checked
to see if it is a valid ``TimeDelta`` format, and next it is checked
to see if it is a valid unit or unit string.
To convert to a ``TimeDelta`` format and optional sub-format the options
are::
tm = TimeDelta(1.0 * u.s)
tm.to_value('jd') # equivalent of tm.jd
tm.to_value('jd', 'decimal') # convert to 'jd' as a Decimal object
tm.to_value('jd', subfmt='decimal')
tm.to_value(format='jd', subfmt='decimal')
To convert to a unit with optional equivalencies, the options are::
tm.to_value('hr') # convert to u.hr (hours)
tm.to_value('hr', []) # specify equivalencies as a positional arg
tm.to_value('hr', equivalencies=[])
tm.to_value(unit='hr', equivalencies=[])
The built-in `~astropy.time.TimeDelta` options for ``format`` are:
{'jd', 'sec', 'datetime'}.
For the two numerical formats 'jd' and 'sec', the available ``subfmt``
options are: {'float', 'long', 'decimal', 'str', 'bytes'}. Here, 'long'
uses ``numpy.longdouble`` for somewhat enhanced precision (with the
enhancement depending on platform), and 'decimal' instances of
:class:`decimal.Decimal` for full precision. For the 'str' and 'bytes'
sub-formats, the number of digits is also chosen such that time values
are represented accurately. Default: as set by ``out_subfmt`` (which by
default picks the first available for a given format, i.e., 'float').
Parameters
----------
format : str, optional
The format in which one wants the `~astropy.time.TimeDelta` values.
Default: the current format.
subfmt : str, optional
Possible sub-format in which the values should be given. Default: as
set by ``out_subfmt`` (which by default picks the first available
for a given format, i.e., 'float' or 'date_hms').
unit : `~astropy.units.UnitBase` instance or str, optional
The unit in which the value should be given.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no
equivalencies will be applied at all, not even any set globally or
within a context.
Returns
-------
value : ndarray or scalar
The value in the format or units specified.
See also
--------
to : Convert to a `~astropy.units.Quantity` instance in a given unit.
value : The time value in the current format.
"""
if not (args or kwargs):
raise TypeError('to_value() missing required format or unit argument')
# TODO: maybe allow 'subfmt' also for units, keeping full precision
# (effectively, by doing the reverse of quantity_day_frac)?
# This way, only equivalencies could lead to possible precision loss.
if ('format' in kwargs
or (args != () and (args[0] is None or args[0] in self.FORMATS))):
# Super-class will error with duplicate arguments, etc.
return super().to_value(*args, **kwargs)
# With positional arguments, we try parsing the first one as a unit,
# so that on failure we can give a more informative exception.
if args:
try:
unit = u.Unit(args[0])
except ValueError as exc:
raise ValueError("first argument is not one of the known "
"formats ({}) and failed to parse as a unit."
.format(list(self.FORMATS))) from exc
args = (unit,) + args[1:]
return u.Quantity(self._time.jd1 + self._time.jd2,
u.day).to_value(*args, **kwargs)
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent TimeDelta object"""
if not isinstance(value, TimeDelta):
try:
value = self.__class__(value, scale=self.scale, format=self.format)
except Exception as err:
raise ValueError('cannot convert value to a compatible TimeDelta '
'object: {}'.format(err))
return value
def isclose(self, other, atol=None, rtol=0.0):
"""Returns a boolean or boolean array where two TimeDelta objects are
element-wise equal within a time tolerance.
This effectively evaluates the expression below::
abs(self - other) <= atol + rtol * abs(other)
Parameters
----------
other : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Quantity or TimeDelta object for comparison.
atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Absolute tolerance for equality with units of time (e.g. ``u.s`` or
``u.day``). Default is one bit in the 128-bit JD time representation,
equivalent to about 20 picosecs.
rtol : float
Relative tolerance for equality
"""
try:
other_day = other.to_value(u.day)
except Exception as err:
raise TypeError(f"'other' argument must support conversion to days: {err}")
if atol is None:
atol = np.finfo(float).eps * u.day
if not isinstance(atol, (u.Quantity, TimeDelta)):
raise TypeError("'atol' argument must be a Quantity or TimeDelta instance, got "
f'{atol.__class__.__name__} instead')
return np.isclose(self.to_value(u.day), other_day,
rtol=rtol, atol=atol.to_value(u.day))
class ScaleValueError(Exception):
pass
def _make_array(val, copy=False):
"""
Take ``val`` and convert/reshape to an array. If ``copy`` is `True`
then copy input values.
Returns
-------
val : ndarray
Array version of ``val``.
"""
if isinstance(val, (tuple, list)) and len(val) > 0 and isinstance(val[0], Time):
dtype = object
else:
dtype = None
val = np.array(val, copy=copy, subok=True, dtype=dtype)
# Allow only float64, string or object arrays as input
# (object is for datetime, maybe add more specific test later?)
# This also ensures the right byteorder for float64 (closes #2942).
if val.dtype.kind == "f" and val.dtype.itemsize >= np.dtype(np.float64).itemsize:
pass
elif val.dtype.kind in 'OSUMaV':
pass
else:
val = np.asanyarray(val, dtype=np.float64)
return val
def _check_for_masked_and_fill(val, val2):
"""
If ``val`` or ``val2`` are masked arrays then fill them and cast
to ndarray.
Returns a mask corresponding to the logical-or of masked elements
in ``val`` and ``val2``. If neither is masked then the return ``mask``
is ``None``.
If either ``val`` or ``val2`` are masked then they are replaced
with filled versions of themselves.
Parameters
----------
val : ndarray or MaskedArray
Input val
val2 : ndarray or MaskedArray
Input val2
Returns
-------
mask, val, val2: ndarray or None
Mask: (None or bool ndarray), val, val2: ndarray
"""
def get_as_filled_ndarray(mask, val):
"""
Fill the given MaskedArray ``val`` from the first non-masked
element in the array. This ensures that upstream Time initialization
will succeed.
Note that nothing happens if there are no masked elements.
"""
fill_value = None
if np.any(val.mask):
# Final mask is the logical-or of inputs
mask = mask | val.mask
# First unmasked element. If all elements are masked then
# use fill_value=None from above which will use val.fill_value.
# As long as the user has set this appropriately then all will
# be fine.
val_unmasked = val.compressed() # 1-d ndarray of unmasked values
if len(val_unmasked) > 0:
fill_value = val_unmasked[0]
# Fill the input ``val``. If fill_value is None then this just returns
# an ndarray view of val (no copy).
val = val.filled(fill_value)
return mask, val
mask = False
if isinstance(val, np.ma.MaskedArray):
mask, val = get_as_filled_ndarray(mask, val)
if isinstance(val2, np.ma.MaskedArray):
mask, val2 = get_as_filled_ndarray(mask, val2)
return mask, val, val2
class OperandTypeError(TypeError):
def __init__(self, left, right, op=None):
op_string = '' if op is None else f' for {op}'
super().__init__(
"Unsupported operand type(s){}: "
"'{}' and '{}'".format(op_string,
left.__class__.__name__,
right.__class__.__name__))
def _check_leapsec():
global _LEAP_SECONDS_CHECK
if _LEAP_SECONDS_CHECK != _LeapSecondsCheck.DONE:
with _LEAP_SECONDS_LOCK:
# There are three ways we can get here:
# 1. First call (NOT_STARTED).
# 2. Re-entrant call (RUNNING). We skip the initialisation
# and don't worry about leap second errors.
# 3. Another thread which raced with the first call
# (RUNNING). The first thread has relinquished the
# lock to us, so initialization is complete.
if _LEAP_SECONDS_CHECK == _LeapSecondsCheck.NOT_STARTED:
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.RUNNING
update_leap_seconds()
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.DONE
def update_leap_seconds(files=None):
"""If the current ERFA leap second table is out of date, try to update it.
Uses `astropy.utils.iers.LeapSeconds.auto_open` to try to find an
up-to-date table. See that routine for the definition of "out of date".
In order to make it safe to call this any time, all exceptions are turned
into warnings,
Parameters
----------
files : list of path-like, optional
List of files/URLs to attempt to open. By default, uses defined by
`astropy.utils.iers.LeapSeconds.auto_open`, which includes the table
used by ERFA itself, so if that is up to date, nothing will happen.
Returns
-------
n_update : int
Number of items updated.
"""
try:
from astropy.utils import iers
table = iers.LeapSeconds.auto_open(files)
return erfa.leap_seconds.update(table)
except Exception as exc:
warn("leap-second auto-update failed due to the following "
f"exception: {exc!r}", AstropyWarning)
return 0
|
c25c1a7b242aa994301a16646a66a6fec71bf62df13006b24c1a81fa76dd6f06 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Time utilities.
In particular, routines to do basic arithmetic on numbers represented by two
doubles, using the procedure of Shewchuk, 1997, Discrete & Computational
Geometry 18(3):305-363 -- http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
Furthermore, some helper routines to turn strings and other types of
objects into two values, and vice versa.
"""
import decimal
import numpy as np
import astropy.units as u
def day_frac(val1, val2, factor=None, divisor=None):
"""Return the sum of ``val1`` and ``val2`` as two float64s.
The returned floats are an integer part and the fractional remainder,
with the latter guaranteed to be within -0.5 and 0.5 (inclusive on
either side, as the integer is rounded to even).
The arithmetic is all done with exact floating point operations so no
precision is lost to rounding error. It is assumed the sum is less
than about 1e16, otherwise the remainder will be greater than 1.0.
Parameters
----------
val1, val2 : array of float
Values to be summed.
factor : float, optional
If given, multiply the sum by it.
divisor : float, optional
If given, divide the sum by it.
Returns
-------
day, frac : float64
Integer and fractional part of val1 + val2.
"""
# Add val1 and val2 exactly, returning the result as two float64s.
# The first is the approximate sum (with some floating point error)
# and the second is the error of the float64 sum.
sum12, err12 = two_sum(val1, val2)
if factor is not None:
sum12, carry = two_product(sum12, factor)
carry += err12 * factor
sum12, err12 = two_sum(sum12, carry)
if divisor is not None:
q1 = sum12 / divisor
p1, p2 = two_product(q1, divisor)
d1, d2 = two_sum(sum12, -p1)
d2 += err12
d2 -= p2
q2 = (d1 + d2) / divisor # 3-part float fine here; nothing can be lost
sum12, err12 = two_sum(q1, q2)
# get integer fraction
day = np.round(sum12)
extra, frac = two_sum(sum12, -day)
frac += extra + err12
# Our fraction can now have gotten >0.5 or <-0.5, which means we would
# loose one bit of precision. So, correct for that.
excess = np.round(frac)
day += excess
extra, frac = two_sum(sum12, -day)
frac += extra + err12
return day, frac
def quantity_day_frac(val1, val2=None):
"""Like ``day_frac``, but for quantities with units of time.
The quantities are separately converted to days. Here, we need to take
care with the conversion since while the routines here can do accurate
multiplication, the conversion factor itself may not be accurate. For
instance, if the quantity is in seconds, the conversion factor is
1./86400., which is not exactly representable as a float.
To work around this, for conversion factors less than unity, rather than
multiply by that possibly inaccurate factor, the value is divided by the
conversion factor of a day to that unit (i.e., by 86400. for seconds). For
conversion factors larger than 1, such as 365.25 for years, we do just
multiply. With this scheme, one has precise conversion factors for all
regular time units that astropy defines. Note, however, that it does not
necessarily work for all custom time units, and cannot work when conversion
to time is via an equivalency. For those cases, one remains limited by the
fact that Quantity calculations are done in double precision, not in
quadruple precision as for time.
"""
if val2 is not None:
res11, res12 = quantity_day_frac(val1)
res21, res22 = quantity_day_frac(val2)
# This summation is can at most lose 1 ULP in the second number.
return res11 + res21, res12 + res22
try:
factor = val1.unit.to(u.day)
except Exception:
# Not a simple scaling, so cannot do the full-precision one.
# But at least try normal conversion, since equivalencies may be set.
return val1.to_value(u.day), 0.
if factor == 1.:
return day_frac(val1.value, 0.)
if factor > 1:
return day_frac(val1.value, 0., factor=factor)
else:
divisor = u.day.to(val1.unit)
return day_frac(val1.value, 0., divisor=divisor)
def two_sum(a, b):
"""
Add ``a`` and ``b`` exactly, returning the result as two float64s.
The first is the approximate sum (with some floating point error)
and the second is the error of the float64 sum.
Using the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
Returns
-------
sum, err : float64
Approximate sum of a + b and the exact floating point error
"""
x = a + b
eb = x - a # bvirtual in Shewchuk
ea = x - eb # avirtual in Shewchuk
eb = b - eb # broundoff in Shewchuk
ea = a - ea # aroundoff in Shewchuk
return x, ea + eb
def two_product(a, b):
"""
Multiple ``a`` and ``b`` exactly, returning the result as two float64s.
The first is the approximate product (with some floating point error)
and the second is the error of the float64 product.
Uses the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
Returns
-------
prod, err : float64
Approximate product a * b and the exact floating point error
"""
x = a * b
ah, al = split(a)
bh, bl = split(b)
y1 = ah * bh
y = x - y1
y2 = al * bh
y -= y2
y3 = ah * bl
y -= y3
y4 = al * bl
y = y4 - y
return x, y
def split(a):
"""
Split float64 in two aligned parts.
Uses the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
"""
c = 134217729. * a # 2**27+1.
abig = c - a
ah = c - abig
al = a - ah
return ah, al
_enough_decimal_places = 34 # to represent two doubles
def longdouble_to_twoval(val1, val2=None):
if val2 is None:
val2 = val1.dtype.type(0.)
else:
best_type = np.result_type(val1.dtype, val2.dtype)
val1 = val1.astype(best_type, copy=False)
val2 = val2.astype(best_type, copy=False)
# day_frac is independent of dtype, as long as the dtype
# are the same and no factor or divisor is given.
i, f = day_frac(val1, val2)
return i.astype(float, copy=False), f.astype(float, copy=False)
def decimal_to_twoval1(val1, val2=None):
with decimal.localcontext() as ctx:
ctx.prec = _enough_decimal_places
d = decimal.Decimal(val1)
i = round(d)
f = d - i
return float(i), float(f)
def bytes_to_twoval1(val1, val2=None):
return decimal_to_twoval1(val1.decode('ascii'))
def twoval_to_longdouble(val1, val2):
return val1.astype(np.longdouble) + val2.astype(np.longdouble)
def twoval_to_decimal1(val1, val2):
with decimal.localcontext() as ctx:
ctx.prec = _enough_decimal_places
return decimal.Decimal(val1) + decimal.Decimal(val2)
def twoval_to_string1(val1, val2, fmt):
if val2 == 0.:
# For some formats, only a single float is really used.
# For those, let numpy take care of correct number of digits.
return str(val1)
result = format(twoval_to_decimal1(val1, val2), fmt).strip('0')
if result[-1] == '.':
result += '0'
return result
def twoval_to_bytes1(val1, val2, fmt):
return twoval_to_string1(val1, val2, fmt).encode('ascii')
decimal_to_twoval = np.vectorize(decimal_to_twoval1)
bytes_to_twoval = np.vectorize(bytes_to_twoval1)
twoval_to_decimal = np.vectorize(twoval_to_decimal1)
twoval_to_string = np.vectorize(twoval_to_string1, excluded='fmt')
twoval_to_bytes = np.vectorize(twoval_to_bytes1, excluded='fmt')
|
53933c60f392ea5d0f0cf6c864e9de3c2fa04c8e144a72488d39c5c537cbfe63 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import fnmatch
import time
import re
import datetime
import warnings
from decimal import Decimal
from collections import OrderedDict, defaultdict
import numpy as np
import erfa
from astropy.utils.decorators import lazyproperty, classproperty
from astropy.utils.exceptions import AstropyDeprecationWarning
import astropy.units as u
from . import _parse_times
from . import utils
from .utils import day_frac, quantity_day_frac, two_sum, two_product
from . import conf
__all__ = ['TimeFormat', 'TimeJD', 'TimeMJD', 'TimeFromEpoch', 'TimeUnix',
'TimeUnixTai', 'TimeCxcSec', 'TimeGPS', 'TimeDecimalYear',
'TimePlotDate', 'TimeUnique', 'TimeDatetime', 'TimeString',
'TimeISO', 'TimeISOT', 'TimeFITS', 'TimeYearDayTime',
'TimeEpochDate', 'TimeBesselianEpoch', 'TimeJulianEpoch',
'TimeDeltaFormat', 'TimeDeltaSec', 'TimeDeltaJD',
'TimeEpochDateString', 'TimeBesselianEpochString',
'TimeJulianEpochString', 'TIME_FORMATS', 'TIME_DELTA_FORMATS',
'TimezoneInfo', 'TimeDeltaDatetime', 'TimeDatetime64', 'TimeYMDHMS',
'TimeNumeric', 'TimeDeltaNumeric']
__doctest_skip__ = ['TimePlotDate']
# These both get filled in at end after TimeFormat subclasses defined.
# Use an OrderedDict to fix the order in which formats are tried.
# This ensures, e.g., that 'isot' gets tried before 'fits'.
TIME_FORMATS = OrderedDict()
TIME_DELTA_FORMATS = OrderedDict()
# Translations between deprecated FITS timescales defined by
# Rots et al. 2015, A&A 574:A36, and timescales used here.
FITS_DEPRECATED_SCALES = {'TDT': 'tt', 'ET': 'tt',
'GMT': 'utc', 'UT': 'utc', 'IAT': 'tai'}
def _regexify_subfmts(subfmts):
"""
Iterate through each of the sub-formats and try substituting simple
regular expressions for the strptime codes for year, month, day-of-month,
hour, minute, second. If no % characters remain then turn the final string
into a compiled regex. This assumes time formats do not have a % in them.
This is done both to speed up parsing of strings and to allow mixed formats
where strptime does not quite work well enough.
"""
new_subfmts = []
for subfmt_tuple in subfmts:
subfmt_in = subfmt_tuple[1]
if isinstance(subfmt_in, str):
for strptime_code, regex in (('%Y', r'(?P<year>\d\d\d\d)'),
('%m', r'(?P<mon>\d{1,2})'),
('%d', r'(?P<mday>\d{1,2})'),
('%H', r'(?P<hour>\d{1,2})'),
('%M', r'(?P<min>\d{1,2})'),
('%S', r'(?P<sec>\d{1,2})')):
subfmt_in = subfmt_in.replace(strptime_code, regex)
if '%' not in subfmt_in:
subfmt_tuple = (subfmt_tuple[0],
re.compile(subfmt_in + '$'),
subfmt_tuple[2])
new_subfmts.append(subfmt_tuple)
return tuple(new_subfmts)
class TimeFormat:
"""
Base class for time representations.
Parameters
----------
val1 : numpy ndarray, list, number, str, or bytes
Values to initialize the time or times. Bytes are decoded as ascii.
val2 : numpy ndarray, list, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
scale : str
Time scale of input value(s)
precision : int
Precision for seconds as floating point
in_subfmt : str
Select subformat for inputting string times
out_subfmt : str
Select subformat for outputting string times
from_jd : bool
If true then val1, val2 are jd1, jd2
"""
_default_scale = 'utc' # As of astropy 0.4
subfmts = ()
_registry = TIME_FORMATS
def __init__(self, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
self.scale = scale # validation of scale done later with _check_scale
self.precision = precision
self.in_subfmt = in_subfmt
self.out_subfmt = out_subfmt
self._jd1, self._jd2 = None, None
if from_jd:
self.jd1 = val1
self.jd2 = val2
else:
val1, val2 = self._check_val_type(val1, val2)
self.set_jds(val1, val2)
def __init_subclass__(cls, **kwargs):
# Register time formats that define a name, but leave out astropy_time since
# it is not a user-accessible format and is only used for initialization into
# a different format.
if 'name' in cls.__dict__ and cls.name != 'astropy_time':
# FIXME: check here that we're not introducing a collision with
# an existing method or attribute; problem is it could be either
# astropy.time.Time or astropy.time.TimeDelta, and at the point
# where this is run neither of those classes have necessarily been
# constructed yet.
if 'value' in cls.__dict__ and not hasattr(cls.value, "fget"):
raise ValueError("If defined, 'value' must be a property")
cls._registry[cls.name] = cls
# If this class defines its own subfmts, preprocess the definitions.
if 'subfmts' in cls.__dict__:
cls.subfmts = _regexify_subfmts(cls.subfmts)
return super().__init_subclass__(**kwargs)
@classmethod
def _get_allowed_subfmt(cls, subfmt):
"""Get an allowed subfmt for this class, either the input ``subfmt``
if this is valid or '*' as a default. This method gets used in situations
where the format of an existing Time object is changing and so the
out_ or in_subfmt may need to be coerced to the default '*' if that
``subfmt`` is no longer valid.
"""
try:
cls._select_subfmts(subfmt)
except ValueError:
subfmt = '*'
return subfmt
@property
def in_subfmt(self):
return self._in_subfmt
@in_subfmt.setter
def in_subfmt(self, subfmt):
# Validate subfmt value for this class, raises ValueError if not.
self._select_subfmts(subfmt)
self._in_subfmt = subfmt
@property
def out_subfmt(self):
return self._out_subfmt
@out_subfmt.setter
def out_subfmt(self, subfmt):
# Validate subfmt value for this class, raises ValueError if not.
self._select_subfmts(subfmt)
self._out_subfmt = subfmt
@property
def jd1(self):
return self._jd1
@jd1.setter
def jd1(self, jd1):
self._jd1 = _validate_jd_for_storage(jd1)
if self._jd2 is not None:
self._jd1, self._jd2 = _broadcast_writeable(self._jd1, self._jd2)
@property
def jd2(self):
return self._jd2
@jd2.setter
def jd2(self, jd2):
self._jd2 = _validate_jd_for_storage(jd2)
if self._jd1 is not None:
self._jd1, self._jd2 = _broadcast_writeable(self._jd1, self._jd2)
def __len__(self):
return len(self.jd1)
@property
def scale(self):
"""Time scale"""
self._scale = self._check_scale(self._scale)
return self._scale
@scale.setter
def scale(self, val):
self._scale = val
def mask_if_needed(self, value):
if self.masked:
value = np.ma.array(value, mask=self.mask, copy=False)
return value
@property
def mask(self):
if 'mask' not in self.cache:
self.cache['mask'] = np.isnan(self.jd2)
if self.cache['mask'].shape:
self.cache['mask'].flags.writeable = False
return self.cache['mask']
@property
def masked(self):
if 'masked' not in self.cache:
self.cache['masked'] = bool(np.any(self.mask))
return self.cache['masked']
@property
def jd2_filled(self):
return np.nan_to_num(self.jd2) if self.masked else self.jd2
@property
def precision(self):
return self._precision
@precision.setter
def precision(self, val):
#Verify precision is 0-9 (inclusive)
if not isinstance(val, int) or val < 0 or val > 9:
raise ValueError('precision attribute must be an int between '
'0 and 9')
self._precision = val
@lazyproperty
def cache(self):
"""
Return the cache associated with this instance.
"""
return defaultdict(dict)
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes"""
# val1 cannot contain nan, but val2 can contain nan
isfinite1 = np.isfinite(val1)
if val1.size > 1: # Calling .all() on a scalar is surprisingly slow
isfinite1 = isfinite1.all() # Note: arr.all() about 3x faster than np.all(arr)
elif val1.size == 0:
isfinite1 = False
ok1 = (val1.dtype.kind == 'f' and val1.dtype.itemsize >= 8
and isfinite1 or val1.size == 0)
ok2 = val2 is None or (
val2.dtype.kind == 'f' and val2.dtype.itemsize >= 8
and not np.any(np.isinf(val2))) or val2.size == 0
if not (ok1 and ok2):
raise TypeError('Input values for {} class must be finite doubles'
.format(self.name))
if getattr(val1, 'unit', None) is not None:
# Convert any quantity-likes to days first, attempting to be
# careful with the conversion, so that, e.g., large numbers of
# seconds get converted without losing precision because
# 1/86400 is not exactly representable as a float.
val1 = u.Quantity(val1, copy=False)
if val2 is not None:
val2 = u.Quantity(val2, copy=False)
try:
val1, val2 = quantity_day_frac(val1, val2)
except u.UnitsError:
raise u.UnitConversionError(
"only quantities with time units can be "
"used to instantiate Time instances.")
# We now have days, but the format may expect another unit.
# On purpose, multiply with 1./day_unit because typically it is
# 1./erfa.DAYSEC, and inverting it recovers the integer.
# (This conversion will get undone in format's set_jds, hence
# there may be room for optimizing this.)
factor = 1. / getattr(self, 'unit', 1.)
if factor != 1.:
val1, carry = two_product(val1, factor)
carry += val2 * factor
val1, val2 = two_sum(val1, carry)
elif getattr(val2, 'unit', None) is not None:
raise TypeError('Cannot mix float and Quantity inputs')
if val2 is None:
val2 = np.array(0, dtype=val1.dtype)
def asarray_or_scalar(val):
"""
Remove ndarray subclasses since for jd1/jd2 we want a pure ndarray
or a Python or numpy scalar.
"""
return np.asarray(val) if isinstance(val, np.ndarray) else val
return asarray_or_scalar(val1), asarray_or_scalar(val2)
def _check_scale(self, scale):
"""
Return a validated scale value.
If there is a class attribute 'scale' then that defines the default /
required time scale for this format. In this case if a scale value was
provided that needs to match the class default, otherwise return
the class default.
Otherwise just make sure that scale is in the allowed list of
scales. Provide a different error message if `None` (no value) was
supplied.
"""
if scale is None:
scale = self._default_scale
if scale not in TIME_SCALES:
raise ScaleValueError("Scale value '{}' not in "
"allowed values {}"
.format(scale, TIME_SCALES))
return scale
def set_jds(self, val1, val2):
"""
Set internal jd1 and jd2 from val1 and val2. Must be provided
by derived classes.
"""
raise NotImplementedError
def to_value(self, parent=None, out_subfmt=None):
"""
Return time representation from internal jd1 and jd2 in specified
``out_subfmt``.
This is the base method that ignores ``parent`` and uses the ``value``
property to compute the output. This is done by temporarily setting
``self.out_subfmt`` and calling ``self.value``. This is required for
legacy Format subclasses prior to astropy 4.0 New code should instead
implement the value functionality in ``to_value()`` and then make the
``value`` property be a simple call to ``self.to_value()``.
Parameters
----------
parent : object
Parent `~astropy.time.Time` object associated with this
`~astropy.time.TimeFormat` object
out_subfmt : str or None
Output subformt (use existing self.out_subfmt if `None`)
Returns
-------
value : numpy.array, numpy.ma.array
Array or masked array of formatted time representation values
"""
# Get value via ``value`` property, overriding out_subfmt temporarily if needed.
if out_subfmt is not None:
out_subfmt_orig = self.out_subfmt
try:
self.out_subfmt = out_subfmt
value = self.value
finally:
self.out_subfmt = out_subfmt_orig
else:
value = self.value
return self.mask_if_needed(value)
@property
def value(self):
raise NotImplementedError
@classmethod
def _select_subfmts(cls, pattern):
"""
Return a list of subformats where name matches ``pattern`` using
fnmatch.
If no subformat matches pattern then a ValueError is raised. A special
case is a format with no allowed subformats, i.e. subfmts=(), and
pattern='*'. This is OK and happens when this method is used for
validation of an out_subfmt.
"""
if not isinstance(pattern, str):
raise ValueError('subfmt attribute must be a string')
elif pattern == '*':
return cls.subfmts
subfmts = [x for x in cls.subfmts if fnmatch.fnmatchcase(x[0], pattern)]
if len(subfmts) == 0:
if len(cls.subfmts) == 0:
raise ValueError(f'subformat not allowed for format {cls.name}')
else:
subfmt_names = [x[0] for x in cls.subfmts]
raise ValueError(f'subformat {pattern!r} must match one of '
f'{subfmt_names} for format {cls.name}')
return subfmts
class TimeNumeric(TimeFormat):
subfmts = (
('float', np.float64, None, np.add),
('long', np.longdouble, utils.longdouble_to_twoval,
utils.twoval_to_longdouble),
('decimal', np.object_, utils.decimal_to_twoval,
utils.twoval_to_decimal),
('str', np.str_, utils.decimal_to_twoval, utils.twoval_to_string),
('bytes', np.bytes_, utils.bytes_to_twoval, utils.twoval_to_bytes),
)
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes"""
# Save original state of val2 because the super()._check_val_type below
# may change val2 from None to np.array(0). The value is saved in order
# to prevent a useless and slow call to np.result_type() below in the
# most common use-case of providing only val1.
orig_val2_is_none = val2 is None
if val1.dtype.kind == 'f':
val1, val2 = super()._check_val_type(val1, val2)
elif (not orig_val2_is_none
or not (val1.dtype.kind in 'US'
or (val1.dtype.kind == 'O'
and all(isinstance(v, Decimal) for v in val1.flat)))):
raise TypeError(
'for {} class, input should be doubles, string, or Decimal, '
'and second values are only allowed for doubles.'
.format(self.name))
val_dtype = (val1.dtype if orig_val2_is_none else
np.result_type(val1.dtype, val2.dtype))
subfmts = self._select_subfmts(self.in_subfmt)
for subfmt, dtype, convert, _ in subfmts:
if np.issubdtype(val_dtype, dtype):
break
else:
raise ValueError('input type not among selected sub-formats.')
if convert is not None:
try:
val1, val2 = convert(val1, val2)
except Exception:
raise TypeError(
'for {} class, input should be (long) doubles, string, '
'or Decimal, and second values are only allowed for '
'(long) doubles.'.format(self.name))
return val1, val2
def to_value(self, jd1=None, jd2=None, parent=None, out_subfmt=None):
"""
Return time representation from internal jd1 and jd2.
Subclasses that require ``parent`` or to adjust the jds should
override this method.
"""
# TODO: do this in __init_subclass__?
if self.__class__.value.fget is not self.__class__.to_value:
return self.value
if jd1 is None:
jd1 = self.jd1
if jd2 is None:
jd2 = self.jd2
if out_subfmt is None:
out_subfmt = self.out_subfmt
subfmt = self._select_subfmts(out_subfmt)[0]
kwargs = {}
if subfmt[0] in ('str', 'bytes'):
unit = getattr(self, 'unit', 1)
digits = int(np.ceil(np.log10(unit / np.finfo(float).eps)))
# TODO: allow a way to override the format.
kwargs['fmt'] = f'.{digits}f'
value = subfmt[3](jd1, jd2, **kwargs)
return self.mask_if_needed(value)
value = property(to_value)
class TimeJD(TimeNumeric):
"""
Julian Date time format.
This represents the number of days since the beginning of
the Julian Period.
For example, 2451544.5 in JD is midnight on January 1, 2000.
"""
name = 'jd'
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2)
class TimeMJD(TimeNumeric):
"""
Modified Julian Date time format.
This represents the number of days since midnight on November 17, 1858.
For example, 51544.0 in MJD is midnight on January 1, 2000.
"""
name = 'mjd'
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
jd1, jd2 = day_frac(val1, val2)
jd1 += erfa.DJM0 # erfa.DJM0=2400000.5 (from erfam.h).
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, **kwargs):
jd1 = self.jd1 - erfa.DJM0 # This cannot lose precision.
jd2 = self.jd2
return super().to_value(jd1=jd1, jd2=jd2, **kwargs)
value = property(to_value)
class TimeDecimalYear(TimeNumeric):
"""
Time as a decimal year, with integer values corresponding to midnight
of the first day of each year. For example 2000.5 corresponds to the
ISO time '2000-07-02 00:00:00'.
"""
name = 'decimalyear'
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
sum12, err12 = two_sum(val1, val2)
iy_start = np.trunc(sum12).astype(int)
extra, y_frac = two_sum(sum12, -iy_start)
y_frac += extra + err12
val = (val1 + val2).astype(np.double)
iy_start = np.trunc(val).astype(int)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(y_frac)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode('ascii')
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,
ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,
ihr, imin, isec)
t_start = Time(jd1_start, jd2_start, scale=self.scale, format='jd')
t_end = Time(jd1_end, jd2_end, scale=self.scale, format='jd')
t_frac = t_start + (t_end - t_start) * y_frac
self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2)
def to_value(self, **kwargs):
scale = self.scale.upper().encode('ascii')
iy_start, ims, ids, ihmsfs = erfa.d2dtf(scale, 0, # precision=0
self.jd1, self.jd2_filled)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(self.jd1)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode('ascii')
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,
ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,
ihr, imin, isec)
# Trying to be precise, but more than float64 not useful.
dt = (self.jd1 - jd1_start) + (self.jd2 - jd2_start)
dt_end = (jd1_end - jd1_start) + (jd2_end - jd2_start)
decimalyear = iy_start + dt / dt_end
return super().to_value(jd1=decimalyear, jd2=np.float64(0.0), **kwargs)
value = property(to_value)
class TimeFromEpoch(TimeNumeric):
"""
Base class for times that represent the interval from a particular
epoch as a floating point multiple of a unit time interval (e.g. seconds
or days).
"""
@classproperty(lazy=True)
def _epoch(cls):
# Ideally we would use `def epoch(cls)` here and not have the instance
# property below. However, this breaks the sphinx API docs generation
# in a way that was not resolved. See #10406 for details.
return Time(cls.epoch_val, cls.epoch_val2, scale=cls.epoch_scale,
format=cls.epoch_format)
@property
def epoch(self):
"""Reference epoch time from which the time interval is measured"""
return self._epoch
def set_jds(self, val1, val2):
"""
Initialize the internal jd1 and jd2 attributes given val1 and val2.
For an TimeFromEpoch subclass like TimeUnix these will be floats giving
the effective seconds since an epoch time (e.g. 1970-01-01 00:00:00).
"""
# Form new JDs based on epoch time + time from epoch (converted to JD).
# One subtlety that might not be obvious is that 1.000 Julian days in
# UTC can be 86400 or 86401 seconds. For the TimeUnix format the
# assumption is that every day is exactly 86400 seconds, so this is, in
# principle, doing the math incorrectly, *except* that it matches the
# definition of Unix time which does not include leap seconds.
# note: use divisor=1./self.unit, since this is either 1 or 1/86400,
# and 1/86400 is not exactly representable as a float64, so multiplying
# by that will cause rounding errors. (But inverting it as a float64
# recovers the exact number)
day, frac = day_frac(val1, val2, divisor=1. / self.unit)
jd1 = self.epoch.jd1 + day
jd2 = self.epoch.jd2 + frac
# For the usual case that scale is the same as epoch_scale, we only need
# to ensure that abs(jd2) <= 0.5. Since abs(self.epoch.jd2) <= 0.5 and
# abs(frac) <= 0.5, we can do simple (fast) checks and arithmetic here
# without another call to day_frac(). Note also that `round(jd2.item())`
# is about 10x faster than `np.round(jd2)`` for a scalar.
if self.epoch.scale == self.scale:
jd1_extra = np.round(jd2) if jd2.shape else round(jd2.item())
jd1 += jd1_extra
jd2 -= jd1_extra
self.jd1, self.jd2 = jd1, jd2
return
# Create a temporary Time object corresponding to the new (jd1, jd2) in
# the epoch scale (e.g. UTC for TimeUnix) then convert that to the
# desired time scale for this object.
#
# A known limitation is that the transform from self.epoch_scale to
# self.scale cannot involve any metadata like lat or lon.
try:
tm = getattr(Time(jd1, jd2, scale=self.epoch_scale,
format='jd'), self.scale)
except Exception as err:
raise ScaleValueError("Cannot convert from '{}' epoch scale '{}'"
"to specified scale '{}', got error:\n{}"
.format(self.name, self.epoch_scale,
self.scale, err)) from err
self.jd1, self.jd2 = day_frac(tm._time.jd1, tm._time.jd2)
def to_value(self, parent=None, **kwargs):
# Make sure that scale is the same as epoch scale so we can just
# subtract the epoch and convert
if self.scale != self.epoch_scale:
if parent is None:
raise ValueError('cannot compute value without parent Time object')
try:
tm = getattr(parent, self.epoch_scale)
except Exception as err:
raise ScaleValueError("Cannot convert from '{}' epoch scale '{}'"
"to specified scale '{}', got error:\n{}"
.format(self.name, self.epoch_scale,
self.scale, err)) from err
jd1, jd2 = tm._time.jd1, tm._time.jd2
else:
jd1, jd2 = self.jd1, self.jd2
# This factor is guaranteed to be exactly representable, which
# means time_from_epoch1 is calculated exactly.
factor = 1. / self.unit
time_from_epoch1 = (jd1 - self.epoch.jd1) * factor
time_from_epoch2 = (jd2 - self.epoch.jd2) * factor
return super().to_value(jd1=time_from_epoch1, jd2=time_from_epoch2, **kwargs)
value = property(to_value)
@property
def _default_scale(self):
return self.epoch_scale
class TimeUnix(TimeFromEpoch):
"""
Unix time (UTC): seconds from 1970-01-01 00:00:00 UTC, ignoring leap seconds.
For example, 946684800.0 in Unix time is midnight on January 1, 2000.
NOTE: this quantity is not exactly unix time and differs from the strict
POSIX definition by up to 1 second on days with a leap second. POSIX
unix time actually jumps backward by 1 second at midnight on leap second
days while this class value is monotonically increasing at 86400 seconds
per UTC day.
"""
name = 'unix'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1970-01-01 00:00:00'
epoch_val2 = None
epoch_scale = 'utc'
epoch_format = 'iso'
class TimeUnixTai(TimeUnix):
"""
Unix time (TAI): SI seconds elapsed since 1970-01-01 00:00:00 TAI (see caveats).
This will generally differ from standard (UTC) Unix time by the cumulative
integral number of leap seconds introduced into UTC since 1972-01-01 UTC
plus the initial offset of 10 seconds at that date.
This convention matches the definition of linux CLOCK_TAI
(https://www.cl.cam.ac.uk/~mgk25/posix-clocks.html),
and the Precision Time Protocol
(https://en.wikipedia.org/wiki/Precision_Time_Protocol), which
is also used by the White Rabbit protocol in High Energy Physics:
https://white-rabbit.web.cern.ch.
Caveats:
- Before 1972, fractional adjustments to UTC were made, so the difference
between ``unix`` and ``unix_tai`` time is no longer an integer.
- Because of the fractional adjustments, to be very precise, ``unix_tai``
is the number of seconds since ``1970-01-01 00:00:00 TAI`` or equivalently
``1969-12-31 23:59:51.999918 UTC``. The difference between TAI and UTC
at that epoch was 8.000082 sec.
- On the day of a positive leap second the difference between ``unix`` and
``unix_tai`` times increases linearly through the day by 1.0. See also the
documentation for the `~astropy.time.TimeUnix` class.
- Negative leap seconds are possible, though none have been needed to date.
Examples
--------
>>> # get the current offset between TAI and UTC
>>> from astropy.time import Time
>>> t = Time('2020-01-01', scale='utc')
>>> t.unix_tai - t.unix
37.0
>>> # Before 1972, the offset between TAI and UTC was not integer
>>> t = Time('1970-01-01', scale='utc')
>>> t.unix_tai - t.unix # doctest: +FLOAT_CMP
8.000082
>>> # Initial offset of 10 seconds in 1972
>>> t = Time('1972-01-01', scale='utc')
>>> t.unix_tai - t.unix
10.0
"""
name = 'unix_tai'
epoch_val = '1970-01-01 00:00:00'
epoch_scale = 'tai'
class TimeCxcSec(TimeFromEpoch):
"""
Chandra X-ray Center seconds from 1998-01-01 00:00:00 TT.
For example, 63072064.184 is midnight on January 1, 2000.
"""
name = 'cxcsec'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1998-01-01 00:00:00'
epoch_val2 = None
epoch_scale = 'tt'
epoch_format = 'iso'
class TimeGPS(TimeFromEpoch):
"""GPS time: seconds from 1980-01-06 00:00:00 UTC
For example, 630720013.0 is midnight on January 1, 2000.
Notes
=====
This implementation is strictly a representation of the number of seconds
(including leap seconds) since midnight UTC on 1980-01-06. GPS can also be
considered as a time scale which is ahead of TAI by a fixed offset
(to within about 100 nanoseconds).
For details, see https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer
"""
name = 'gps'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1980-01-06 00:00:19'
# above epoch is the same as Time('1980-01-06 00:00:00', scale='utc').tai
epoch_val2 = None
epoch_scale = 'tai'
epoch_format = 'iso'
class TimePlotDate(TimeFromEpoch):
"""
Matplotlib `~matplotlib.pyplot.plot_date` input:
1 + number of days from 0001-01-01 00:00:00 UTC
This can be used directly in the matplotlib `~matplotlib.pyplot.plot_date`
function::
>>> import matplotlib.pyplot as plt
>>> jyear = np.linspace(2000, 2001, 20)
>>> t = Time(jyear, format='jyear', scale='utc')
>>> plt.plot_date(t.plot_date, jyear)
>>> plt.gcf().autofmt_xdate() # orient date labels at a slant
>>> plt.draw()
For example, 730120.0003703703 is midnight on January 1, 2000.
"""
# This corresponds to the zero reference time for matplotlib plot_date().
# Note that TAI and UTC are equivalent at the reference time.
name = 'plot_date'
unit = 1.0
epoch_val = 1721424.5 # Time('0001-01-01 00:00:00', scale='tai').jd - 1
epoch_val2 = None
epoch_scale = 'utc'
epoch_format = 'jd'
@lazyproperty
def epoch(self):
"""Reference epoch time from which the time interval is measured"""
try:
# Matplotlib >= 3.3 has a get_epoch() function
from matplotlib.dates import get_epoch
except ImportError:
# If no get_epoch() then the epoch is '0001-01-01'
_epoch = self._epoch
else:
# Get the matplotlib date epoch as an ISOT string in UTC
epoch_utc = get_epoch()
from erfa import ErfaWarning
with warnings.catch_warnings():
# Catch possible dubious year warnings from erfa
warnings.filterwarnings('ignore', category=ErfaWarning)
_epoch = Time(epoch_utc, scale='utc', format='isot')
_epoch.format = 'jd'
return _epoch
class TimeStardate(TimeFromEpoch):
"""
Stardate: date units from 2318-07-05 12:00:00 UTC.
For example, stardate 41153.7 is 00:52 on April 30, 2363.
See http://trekguide.com/Stardates.htm#TNG for calculations and reference points
"""
name = 'stardate'
unit = 0.397766856 # Stardate units per day
epoch_val = '2318-07-05 11:00:00' # Date and time of stardate 00000.00
epoch_val2 = None
epoch_scale = 'tai'
epoch_format = 'iso'
class TimeUnique(TimeFormat):
"""
Base class for time formats that can uniquely create a time object
without requiring an explicit format specifier. This class does
nothing but provide inheritance to identify a class as unique.
"""
class TimeAstropyTime(TimeUnique):
"""
Instantiate date from an Astropy Time object (or list thereof).
This is purely for instantiating from a Time object. The output
format is the same as the first time instance.
"""
name = 'astropy_time'
def __new__(cls, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
"""
Use __new__ instead of __init__ to output a class instance that
is the same as the class of the first Time object in the list.
"""
val1_0 = val1.flat[0]
if not (isinstance(val1_0, Time) and all(type(val) is type(val1_0)
for val in val1.flat)):
raise TypeError('Input values for {} class must all be same '
'astropy Time type.'.format(cls.name))
if scale is None:
scale = val1_0.scale
if val1.shape:
vals = [getattr(val, scale)._time for val in val1]
jd1 = np.concatenate([np.atleast_1d(val.jd1) for val in vals])
jd2 = np.concatenate([np.atleast_1d(val.jd2) for val in vals])
# Collect individual location values and merge into a single location.
if any(tm.location is not None for tm in val1):
if any(tm.location is None for tm in val1):
raise ValueError('cannot concatenate times unless all locations '
'are set or no locations are set')
locations = []
for tm in val1:
location = np.broadcast_to(tm.location, tm._time.jd1.shape,
subok=True)
locations.append(np.atleast_1d(location))
location = np.concatenate(locations)
else:
location = None
else:
val = getattr(val1_0, scale)._time
jd1, jd2 = val.jd1, val.jd2
location = val1_0.location
OutTimeFormat = val1_0._time.__class__
self = OutTimeFormat(jd1, jd2, scale, precision, in_subfmt, out_subfmt,
from_jd=True)
# Make a temporary hidden attribute to transfer location back to the
# parent Time object where it needs to live.
self._location = location
return self
class TimeDatetime(TimeUnique):
"""
Represent date as Python standard library `~datetime.datetime` object
Example::
>>> from astropy.time import Time
>>> from datetime import datetime
>>> t = Time(datetime(2000, 1, 2, 12, 0, 0), scale='utc')
>>> t.iso
'2000-01-02 12:00:00.000'
>>> t.tt.datetime
datetime.datetime(2000, 1, 2, 12, 1, 4, 184000)
"""
name = 'datetime'
def _check_val_type(self, val1, val2):
if not all(isinstance(val, datetime.datetime) for val in val1.flat):
raise TypeError('Input values for {} class must be '
'datetime objects'.format(self.name))
if val2 is not None:
raise ValueError(
f'{self.name} objects do not accept a val2 but you provided {val2}')
return val1, None
def set_jds(self, val1, val2):
"""Convert datetime object contained in val1 to jd1, jd2"""
# Iterate through the datetime objects, getting year, month, etc.
iterator = np.nditer([val1, None, None, None, None, None, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=[None] + 5*[np.intc] + [np.double])
for val, iy, im, id, ihr, imin, dsec in iterator:
dt = val.item()
if dt.tzinfo is not None:
dt = (dt - dt.utcoffset()).replace(tzinfo=None)
iy[...] = dt.year
im[...] = dt.month
id[...] = dt.day
ihr[...] = dt.hour
imin[...] = dt.minute
dsec[...] = dt.second + dt.microsecond / 1e6
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
*iterator.operands[1:])
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, timezone=None, parent=None, out_subfmt=None):
"""
Convert to (potentially timezone-aware) `~datetime.datetime` object.
If ``timezone`` is not ``None``, return a timezone-aware datetime
object.
Parameters
----------
timezone : {`~datetime.tzinfo`, None}, optional
If not `None`, return timezone-aware datetime.
Returns
-------
`~datetime.datetime`
If ``timezone`` is not ``None``, output will be timezone-aware.
"""
if out_subfmt is not None:
# Out_subfmt not allowed for this format, so raise the standard
# exception by trying to validate the value.
self._select_subfmts(out_subfmt)
if timezone is not None:
if self._scale != 'utc':
raise ScaleValueError("scale is {}, must be 'utc' when timezone "
"is supplied.".format(self._scale))
# Rather than define a value property directly, we have a function,
# since we want to be able to pass in timezone information.
scale = self.scale.upper().encode('ascii')
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 6, # 6 for microsec
self.jd1, self.jd2_filled)
ihrs = ihmsfs['h']
imins = ihmsfs['m']
isecs = ihmsfs['s']
ifracs = ihmsfs['f']
iterator = np.nditer([iys, ims, ids, ihrs, imins, isecs, ifracs, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=7*[None] + [object])
for iy, im, id, ihr, imin, isec, ifracsec, out in iterator:
if isec >= 60:
raise ValueError('Time {} is within a leap second but datetime '
'does not support leap seconds'
.format((iy, im, id, ihr, imin, isec, ifracsec)))
if timezone is not None:
out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec,
tzinfo=TimezoneInfo()).astimezone(timezone)
else:
out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec)
return self.mask_if_needed(iterator.operands[-1])
value = property(to_value)
class TimeYMDHMS(TimeUnique):
"""
ymdhms: A Time format to represent Time as year, month, day, hour,
minute, second (thus the name ymdhms).
Acceptable inputs must have keys or column names in the "YMDHMS" set of
``year``, ``month``, ``day`` ``hour``, ``minute``, ``second``:
- Dict with keys in the YMDHMS set
- NumPy structured array, record array or astropy Table, or single row
of those types, with column names in the YMDHMS set
One can supply a subset of the YMDHMS values, for instance only 'year',
'month', and 'day'. Inputs have the following defaults::
'month': 1, 'day': 1, 'hour': 0, 'minute': 0, 'second': 0
When the input is supplied as a ``dict`` then each value can be either a
scalar value or an array. The values will be broadcast to a common shape.
Example::
>>> from astropy.time import Time
>>> t = Time({'year': 2015, 'month': 2, 'day': 3,
... 'hour': 12, 'minute': 13, 'second': 14.567},
... scale='utc')
>>> t.iso
'2015-02-03 12:13:14.567'
>>> t.ymdhms.year
2015
"""
name = 'ymdhms'
def _check_val_type(self, val1, val2):
"""
This checks inputs for the YMDHMS format.
It is bit more complex than most format checkers because of the flexible
input that is allowed. Also, it actually coerces ``val1`` into an appropriate
dict of ndarrays that can be used easily by ``set_jds()``. This is useful
because it makes it easy to get default values in that routine.
Parameters
----------
val1 : ndarray or None
val2 : ndarray or None
Returns
-------
val1_as_dict, val2 : val1 as dict or None, val2 is always None
"""
if val2 is not None:
raise ValueError('val2 must be None for ymdhms format')
ymdhms = ['year', 'month', 'day', 'hour', 'minute', 'second']
if val1.dtype.names:
# Convert to a dict of ndarray
val1_as_dict = {name: val1[name] for name in val1.dtype.names}
elif val1.shape == (0,):
# Input was empty list [], so set to None and set_jds will handle this
return None, None
elif (val1.dtype.kind == 'O'
and val1.shape == ()
and isinstance(val1.item(), dict)):
# Code gets here for input as a dict. The dict input
# can be either scalar values or N-d arrays.
# Extract the item (which is a dict) and broadcast values to the
# same shape here.
names = val1.item().keys()
values = val1.item().values()
val1_as_dict = {name: value for name, value
in zip(names, np.broadcast_arrays(*values))}
else:
raise ValueError('input must be dict or table-like')
# Check that the key names now are good.
names = val1_as_dict.keys()
required_names = ymdhms[:len(names)]
def comma_repr(vals):
return ', '.join(repr(val) for val in vals)
bad_names = set(names) - set(ymdhms)
if bad_names:
raise ValueError(f'{comma_repr(bad_names)} not allowed as YMDHMS key name(s)')
if set(names) != set(required_names):
raise ValueError(f'for {len(names)} input key names '
f'you must supply {comma_repr(required_names)}')
return val1_as_dict, val2
def set_jds(self, val1, val2):
if val1 is None:
# Input was empty list []
jd1 = np.array([], dtype=np.float64)
jd2 = np.array([], dtype=np.float64)
else:
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
val1['year'],
val1.get('month', 1),
val1.get('day', 1),
val1.get('hour', 0),
val1.get('minute', 0),
val1.get('second', 0))
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
scale = self.scale.upper().encode('ascii')
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 9,
self.jd1, self.jd2_filled)
out = np.empty(self.jd1.shape, dtype=[('year', 'i4'),
('month', 'i4'),
('day', 'i4'),
('hour', 'i4'),
('minute', 'i4'),
('second', 'f8')])
out['year'] = iys
out['month'] = ims
out['day'] = ids
out['hour'] = ihmsfs['h']
out['minute'] = ihmsfs['m']
out['second'] = ihmsfs['s'] + ihmsfs['f'] * 10**(-9)
out = out.view(np.recarray)
return self.mask_if_needed(out)
class TimezoneInfo(datetime.tzinfo):
"""
Subclass of the `~datetime.tzinfo` object, used in the
to_datetime method to specify timezones.
It may be safer in most cases to use a timezone database package like
pytz rather than defining your own timezones - this class is mainly
a workaround for users without pytz.
"""
@u.quantity_input(utc_offset=u.day, dst=u.day)
def __init__(self, utc_offset=0 * u.day, dst=0 * u.day, tzname=None):
"""
Parameters
----------
utc_offset : `~astropy.units.Quantity`, optional
Offset from UTC in days. Defaults to zero.
dst : `~astropy.units.Quantity`, optional
Daylight Savings Time offset in days. Defaults to zero
(no daylight savings).
tzname : str or None, optional
Name of timezone
Examples
--------
>>> from datetime import datetime
>>> from astropy.time import TimezoneInfo # Specifies a timezone
>>> import astropy.units as u
>>> utc = TimezoneInfo() # Defaults to UTC
>>> utc_plus_one_hour = TimezoneInfo(utc_offset=1*u.hour) # UTC+1
>>> dt_aware = datetime(2000, 1, 1, 0, 0, 0, tzinfo=utc_plus_one_hour)
>>> print(dt_aware)
2000-01-01 00:00:00+01:00
>>> print(dt_aware.astimezone(utc))
1999-12-31 23:00:00+00:00
"""
if utc_offset == 0 and dst == 0 and tzname is None:
tzname = 'UTC'
self._utcoffset = datetime.timedelta(utc_offset.to_value(u.day))
self._tzname = tzname
self._dst = datetime.timedelta(dst.to_value(u.day))
def utcoffset(self, dt):
return self._utcoffset
def tzname(self, dt):
return str(self._tzname)
def dst(self, dt):
return self._dst
class TimeString(TimeUnique):
"""
Base class for string-like time representations.
This class assumes that anything following the last decimal point to the
right is a fraction of a second.
**Fast C-based parser**
Time format classes can take advantage of a fast C-based parser if the times
are represented as fixed-format strings with year, month, day-of-month,
hour, minute, second, OR year, day-of-year, hour, minute, second. This can
be a factor of 20 or more faster than the pure Python parser.
Fixed format means that the components always have the same number of
characters. The Python parser will accept ``2001-9-2`` as a date, but the C
parser would require ``2001-09-02``.
A subclass in this case must define a class attribute ``fast_parser_pars``
which is a `dict` with all of the keys below. An inherited attribute is not
checked, only an attribute in the class ``__dict__``.
- ``delims`` (tuple of int): ASCII code for character at corresponding
``starts`` position (0 => no character)
- ``starts`` (tuple of int): position where component starts (including
delimiter if present). Use -1 for the month component for format that use
day of year.
- ``stops`` (tuple of int): position where component ends. Use -1 to
continue to end of string, or for the month component for formats that use
day of year.
- ``break_allowed`` (tuple of int): if true (1) then the time string can
legally end just before the corresponding component (e.g. "2000-01-01"
is a valid time but "2000-01-01 12" is not).
- ``has_day_of_year`` (int): 0 if dates have year, month, day; 1 if year,
day-of-year
"""
def __init_subclass__(cls, **kwargs):
if 'fast_parser_pars' in cls.__dict__:
fpp = cls.fast_parser_pars
fpp = np.array(list(zip(map(chr, fpp['delims']),
fpp['starts'],
fpp['stops'],
fpp['break_allowed'])),
_parse_times.dt_pars)
if cls.fast_parser_pars['has_day_of_year']:
fpp['start'][1] = fpp['stop'][1] = -1
cls._fast_parser = _parse_times.create_parser(fpp)
super().__init_subclass__(**kwargs)
def _check_val_type(self, val1, val2):
if val1.dtype.kind not in ('S', 'U') and val1.size:
raise TypeError(f'Input values for {self.name} class must be strings')
if val2 is not None:
raise ValueError(
f'{self.name} objects do not accept a val2 but you provided {val2}')
return val1, None
def parse_string(self, timestr, subfmts):
"""Read time from a single string, using a set of possible formats."""
# Datetime components required for conversion to JD by ERFA, along
# with the default values.
components = ('year', 'mon', 'mday', 'hour', 'min', 'sec')
defaults = (None, 1, 1, 0, 0, 0)
# Assume that anything following "." on the right side is a
# floating fraction of a second.
try:
idot = timestr.rindex('.')
except Exception:
fracsec = 0.0
else:
timestr, fracsec = timestr[:idot], timestr[idot:]
fracsec = float(fracsec)
for _, strptime_fmt_or_regex, _ in subfmts:
if isinstance(strptime_fmt_or_regex, str):
try:
tm = time.strptime(timestr, strptime_fmt_or_regex)
except ValueError:
continue
else:
vals = [getattr(tm, 'tm_' + component)
for component in components]
else:
tm = re.match(strptime_fmt_or_regex, timestr)
if tm is None:
continue
tm = tm.groupdict()
vals = [int(tm.get(component, default)) for component, default
in zip(components, defaults)]
# Add fractional seconds
vals[-1] = vals[-1] + fracsec
return vals
else:
raise ValueError(f'Time {timestr} does not match {self.name} format')
def set_jds(self, val1, val2):
"""Parse the time strings contained in val1 and set jd1, jd2"""
# If specific input subformat is required then use the Python parser.
# Also do this if Time format class does not define `use_fast_parser` or
# if the fast parser is entirely disabled. Note that `use_fast_parser`
# is ignored for format classes that don't have a fast parser.
if (self.in_subfmt != '*'
or '_fast_parser' not in self.__class__.__dict__
or conf.use_fast_parser == 'False'):
jd1, jd2 = self.get_jds_python(val1, val2)
else:
try:
jd1, jd2 = self.get_jds_fast(val1, val2)
except Exception:
# Fall through to the Python parser unless fast is forced.
if conf.use_fast_parser == 'force':
raise
else:
jd1, jd2 = self.get_jds_python(val1, val2)
self.jd1 = jd1
self.jd2 = jd2
def get_jds_python(self, val1, val2):
"""Parse the time strings contained in val1 and get jd1, jd2"""
# Select subformats based on current self.in_subfmt
subfmts = self._select_subfmts(self.in_subfmt)
# Be liberal in what we accept: convert bytes to ascii.
# Here .item() is needed for arrays with entries of unequal length,
# to strip trailing 0 bytes.
to_string = (str if val1.dtype.kind == 'U' else
lambda x: str(x.item(), encoding='ascii'))
iterator = np.nditer([val1, None, None, None, None, None, None],
flags=['zerosize_ok'],
op_dtypes=[None] + 5 * [np.intc] + [np.double])
for val, iy, im, id, ihr, imin, dsec in iterator:
val = to_string(val)
iy[...], im[...], id[...], ihr[...], imin[...], dsec[...] = (
self.parse_string(val, subfmts))
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
*iterator.operands[1:])
jd1, jd2 = day_frac(jd1, jd2)
return jd1, jd2
def get_jds_fast(self, val1, val2):
"""Use fast C parser to parse time strings in val1 and get jd1, jd2"""
# Handle bytes or str input and convert to uint8. We need to the
# dtype _parse_times.dt_u1 instead of uint8, since otherwise it is
# not possible to create a gufunc with structured dtype output.
# See note about ufunc type resolver in pyerfa/erfa/ufunc.c.templ.
if val1.dtype.kind == 'U':
# Note: val1.astype('S') is *very* slow, so we check ourselves
# that the input is pure ASCII.
val1_uint32 = val1.view((np.uint32, val1.dtype.itemsize // 4))
if np.any(val1_uint32 > 127):
raise ValueError('input is not pure ASCII')
# It might be possible to avoid making a copy via astype with
# cleverness in parse_times.c but leave that for another day.
chars = val1_uint32.astype(_parse_times.dt_u1)
else:
chars = val1.view((_parse_times.dt_u1, val1.dtype.itemsize))
# Call the fast parsing ufunc.
time_struct = self._fast_parser(chars)
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
time_struct['year'],
time_struct['month'],
time_struct['day'],
time_struct['hour'],
time_struct['minute'],
time_struct['second'])
return day_frac(jd1, jd2)
def str_kwargs(self):
"""
Generator that yields a dict of values corresponding to the
calendar date and time for the internal JD values.
"""
scale = self.scale.upper().encode('ascii'),
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, self.precision,
self.jd1, self.jd2_filled)
# Get the str_fmt element of the first allowed output subformat
_, _, str_fmt = self._select_subfmts(self.out_subfmt)[0]
yday = None
has_yday = '{yday:' in str_fmt
ihrs = ihmsfs['h']
imins = ihmsfs['m']
isecs = ihmsfs['s']
ifracs = ihmsfs['f']
for iy, im, id, ihr, imin, isec, ifracsec in np.nditer(
[iys, ims, ids, ihrs, imins, isecs, ifracs],
flags=['zerosize_ok']):
if has_yday:
yday = datetime.datetime(iy, im, id).timetuple().tm_yday
yield {'year': int(iy), 'mon': int(im), 'day': int(id),
'hour': int(ihr), 'min': int(imin), 'sec': int(isec),
'fracsec': int(ifracsec), 'yday': yday}
def format_string(self, str_fmt, **kwargs):
"""Write time to a string using a given format.
By default, just interprets str_fmt as a format string,
but subclasses can add to this.
"""
return str_fmt.format(**kwargs)
@property
def value(self):
# Select the first available subformat based on current
# self.out_subfmt
subfmts = self._select_subfmts(self.out_subfmt)
_, _, str_fmt = subfmts[0]
# TODO: fix this ugly hack
if self.precision > 0 and str_fmt.endswith('{sec:02d}'):
str_fmt += '.{fracsec:0' + str(self.precision) + 'd}'
# Try to optimize this later. Can't pre-allocate because length of
# output could change, e.g. year rolls from 999 to 1000.
outs = []
for kwargs in self.str_kwargs():
outs.append(str(self.format_string(str_fmt, **kwargs)))
return np.array(outs).reshape(self.jd1.shape)
class TimeISO(TimeString):
"""
ISO 8601 compliant date-time format "YYYY-MM-DD HH:MM:SS.sss...".
For example, 2000-01-01 00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'iso'
subfmts = (('date_hms',
'%Y-%m-%d %H:%M:%S',
# XXX To Do - use strftime for output ??
'{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y-%m-%d %H:%M',
'{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}'),
('date',
'%Y-%m-%d',
'{year:d}-{mon:02d}-{day:02d}'))
# Define positions and starting delimiter for year, month, day, hour,
# minute, seconds components of an ISO time. This is used by the fast
# C-parser parse_ymdhms_times()
#
# "2000-01-12 13:14:15.678"
# 01234567890123456789012
# yyyy-mm-dd hh:mm:ss.fff
# Parsed as ('yyyy', '-mm', '-dd', ' hh', ':mm', ':ss', '.fff')
fast_parser_pars = dict(
delims=(0, ord('-'), ord('-'), ord(' '), ord(':'), ord(':'), ord('.')),
starts=(0, 4, 7, 10, 13, 16, 19),
stops=(3, 6, 9, 12, 15, 18, -1),
# Break allowed *before*
# y m d h m s f
break_allowed=(0, 0, 0, 1, 0, 1, 1),
has_day_of_year=0)
def parse_string(self, timestr, subfmts):
# Handle trailing 'Z' for UTC time
if timestr.endswith('Z'):
if self.scale != 'utc':
raise ValueError("Time input terminating in 'Z' must have "
"scale='UTC'")
timestr = timestr[:-1]
return super().parse_string(timestr, subfmts)
class TimeISOT(TimeISO):
"""
ISO 8601 compliant date-time format "YYYY-MM-DDTHH:MM:SS.sss...".
This is the same as TimeISO except for a "T" instead of space between
the date and time.
For example, 2000-01-01T00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'isot'
subfmts = (('date_hms',
'%Y-%m-%dT%H:%M:%S',
'{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y-%m-%dT%H:%M',
'{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}'),
('date',
'%Y-%m-%d',
'{year:d}-{mon:02d}-{day:02d}'))
# See TimeISO for explanation
fast_parser_pars = dict(
delims=(0, ord('-'), ord('-'), ord('T'), ord(':'), ord(':'), ord('.')),
starts=(0, 4, 7, 10, 13, 16, 19),
stops=(3, 6, 9, 12, 15, 18, -1),
# Break allowed *before*
# y m d h m s f
break_allowed=(0, 0, 0, 1, 0, 1, 1),
has_day_of_year=0)
class TimeYearDayTime(TimeISO):
"""
Year, day-of-year and time as "YYYY:DOY:HH:MM:SS.sss...".
The day-of-year (DOY) goes from 001 to 365 (366 in leap years).
For example, 2000:001:00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'yday'
subfmts = (('date_hms',
'%Y:%j:%H:%M:%S',
'{year:d}:{yday:03d}:{hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y:%j:%H:%M',
'{year:d}:{yday:03d}:{hour:02d}:{min:02d}'),
('date',
'%Y:%j',
'{year:d}:{yday:03d}'))
# Define positions and starting delimiter for year, month, day, hour,
# minute, seconds components of an ISO time. This is used by the fast
# C-parser parse_ymdhms_times()
#
# "2000:123:13:14:15.678"
# 012345678901234567890
# yyyy:ddd:hh:mm:ss.fff
# Parsed as ('yyyy', ':ddd', ':hh', ':mm', ':ss', '.fff')
#
# delims: character at corresponding `starts` position (0 => no character)
# starts: position where component starts (including delimiter if present)
# stops: position where component ends (-1 => continue to end of string)
fast_parser_pars = dict(
delims=(0, 0, ord(':'), ord(':'), ord(':'), ord(':'), ord('.')),
starts=(0, -1, 4, 8, 11, 14, 17),
stops=(3, -1, 7, 10, 13, 16, -1),
# Break allowed before:
# y m d h m s f
break_allowed=(0, 0, 0, 1, 0, 1, 1),
has_day_of_year=1)
class TimeDatetime64(TimeISOT):
name = 'datetime64'
def _check_val_type(self, val1, val2):
if not val1.dtype.kind == 'M':
if val1.size > 0:
raise TypeError('Input values for {} class must be '
'datetime64 objects'.format(self.name))
else:
val1 = np.array([], 'datetime64[D]')
if val2 is not None:
raise ValueError(
f'{self.name} objects do not accept a val2 but you provided {val2}')
return val1, None
def set_jds(self, val1, val2):
# If there are any masked values in the ``val1`` datetime64 array
# ('NaT') then stub them with a valid date so downstream parse_string
# will work. The value under the mask is arbitrary but a "modern" date
# is good.
mask = np.isnat(val1)
masked = np.any(mask)
if masked:
val1 = val1.copy()
val1[mask] = '2000'
# Make sure M(onth) and Y(ear) dates will parse and convert to bytestring
if val1.dtype.name in ['datetime64[M]', 'datetime64[Y]']:
val1 = val1.astype('datetime64[D]')
val1 = val1.astype('S')
# Standard ISO string parsing now
super().set_jds(val1, val2)
# Finally apply mask if necessary
if masked:
self.jd2[mask] = np.nan
@property
def value(self):
precision = self.precision
self.precision = 9
ret = super().value
self.precision = precision
return ret.astype('datetime64')
class TimeFITS(TimeString):
"""
FITS format: "[±Y]YYYY-MM-DD[THH:MM:SS[.sss]]".
ISOT but can give signed five-digit year (mostly for negative years);
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date': date
- 'longdate_hms': as 'date_hms', but with signed 5-digit year
- 'longdate': as 'date', but with signed 5-digit year
See Rots et al., 2015, A&A 574:A36 (arXiv:1409.7583).
"""
name = 'fits'
subfmts = (
('date_hms',
(r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)T'
r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'),
'{year:04d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('date',
r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)',
'{year:04d}-{mon:02d}-{day:02d}'),
('longdate_hms',
(r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)T'
r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'),
'{year:+06d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('longdate',
r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)',
'{year:+06d}-{mon:02d}-{day:02d}'))
# Add the regex that parses the scale and possible realization.
# Support for this is deprecated. Read old style but no longer write
# in this style.
subfmts = tuple(
(subfmt[0],
subfmt[1] + r'(\((?P<scale>\w+)(\((?P<realization>\w+)\))?\))?',
subfmt[2]) for subfmt in subfmts)
def parse_string(self, timestr, subfmts):
"""Read time and deprecated scale if present"""
# Try parsing with any of the allowed sub-formats.
for _, regex, _ in subfmts:
tm = re.match(regex, timestr)
if tm:
break
else:
raise ValueError(f'Time {timestr} does not match {self.name} format')
tm = tm.groupdict()
# Scale and realization are deprecated and strings in this form
# are no longer created. We issue a warning but still use the value.
if tm['scale'] is not None:
warnings.warn("FITS time strings should no longer have embedded time scale.",
AstropyDeprecationWarning)
# If a scale was given, translate from a possible deprecated
# timescale identifier to the scale used by Time.
fits_scale = tm['scale'].upper()
scale = FITS_DEPRECATED_SCALES.get(fits_scale, fits_scale.lower())
if scale not in TIME_SCALES:
raise ValueError("Scale {!r} is not in the allowed scales {}"
.format(scale, sorted(TIME_SCALES)))
# If no scale was given in the initialiser, set the scale to
# that given in the string. Realization is ignored
# and is only supported to allow old-style strings to be
# parsed.
if self._scale is None:
self._scale = scale
if scale != self.scale:
raise ValueError("Input strings for {} class must all "
"have consistent time scales."
.format(self.name))
return [int(tm['year']), int(tm['mon']), int(tm['mday']),
int(tm.get('hour', 0)), int(tm.get('min', 0)),
float(tm.get('sec', 0.))]
@property
def value(self):
"""Convert times to strings, using signed 5 digit if necessary."""
if 'long' not in self.out_subfmt:
# If we have times before year 0 or after year 9999, we can
# output only in a "long" format, using signed 5-digit years.
jd = self.jd1 + self.jd2
if jd.size and (jd.min() < 1721425.5 or jd.max() >= 5373484.5):
self.out_subfmt = 'long' + self.out_subfmt
return super().value
class TimeEpochDate(TimeNumeric):
"""
Base class for support floating point Besselian and Julian epoch dates
"""
_default_scale = 'tt' # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(val1 + val2)
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, **kwargs):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
value = jd_to_epoch(self.jd1, self.jd2)
return super().to_value(jd1=value, jd2=np.float64(0.0), **kwargs)
value = property(to_value)
class TimeBesselianEpoch(TimeEpochDate):
"""Besselian Epoch year as floating point value(s) like 1950.0"""
name = 'byear'
epoch_to_jd = 'epb2jd'
jd_to_epoch = 'epb'
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes"""
if hasattr(val1, 'to') and hasattr(val1, 'unit') and val1.unit is not None:
raise ValueError("Cannot use Quantities for 'byear' format, "
"as the interpretation would be ambiguous. "
"Use float with Besselian year instead. ")
# FIXME: is val2 really okay here?
return super()._check_val_type(val1, val2)
class TimeJulianEpoch(TimeEpochDate):
"""Julian Epoch year as floating point value(s) like 2000.0"""
name = 'jyear'
unit = erfa.DJY # 365.25, the Julian year, for conversion to quantities
epoch_to_jd = 'epj2jd'
jd_to_epoch = 'epj'
class TimeEpochDateString(TimeString):
"""
Base class to support string Besselian and Julian epoch dates
such as 'B1950.0' or 'J2000.0' respectively.
"""
_default_scale = 'tt' # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
epoch_prefix = self.epoch_prefix
# Be liberal in what we accept: convert bytes to ascii.
to_string = (str if val1.dtype.kind == 'U' else
lambda x: str(x.item(), encoding='ascii'))
iterator = np.nditer([val1, None], op_dtypes=[val1.dtype, np.double],
flags=['zerosize_ok'])
for val, years in iterator:
try:
time_str = to_string(val)
epoch_type, year_str = time_str[0], time_str[1:]
year = float(year_str)
if epoch_type.upper() != epoch_prefix:
raise ValueError
except (IndexError, ValueError, UnicodeEncodeError):
raise ValueError(f'Time {val} does not match {self.name} format')
else:
years[...] = year
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(iterator.operands[-1])
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
years = jd_to_epoch(self.jd1, self.jd2)
# Use old-style format since it is a factor of 2 faster
str_fmt = self.epoch_prefix + '%.' + str(self.precision) + 'f'
outs = [str_fmt % year for year in years.flat]
return np.array(outs).reshape(self.jd1.shape)
class TimeBesselianEpochString(TimeEpochDateString):
"""Besselian Epoch year as string value(s) like 'B1950.0'"""
name = 'byear_str'
epoch_to_jd = 'epb2jd'
jd_to_epoch = 'epb'
epoch_prefix = 'B'
class TimeJulianEpochString(TimeEpochDateString):
"""Julian Epoch year as string value(s) like 'J2000.0'"""
name = 'jyear_str'
epoch_to_jd = 'epj2jd'
jd_to_epoch = 'epj'
epoch_prefix = 'J'
class TimeDeltaFormat(TimeFormat):
"""Base class for time delta representations"""
_registry = TIME_DELTA_FORMATS
def _check_scale(self, scale):
"""
Check that the scale is in the allowed list of scales, or is `None`
"""
if scale is not None and scale not in TIME_DELTA_SCALES:
raise ScaleValueError("Scale value '{}' not in "
"allowed values {}"
.format(scale, TIME_DELTA_SCALES))
return scale
class TimeDeltaNumeric(TimeDeltaFormat, TimeNumeric):
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2, divisor=1. / self.unit)
def to_value(self, **kwargs):
# Note that 1/unit is always exactly representable, so the
# following multiplications are exact.
factor = 1. / self.unit
jd1 = self.jd1 * factor
jd2 = self.jd2 * factor
return super().to_value(jd1=jd1, jd2=jd2, **kwargs)
value = property(to_value)
class TimeDeltaSec(TimeDeltaNumeric):
"""Time delta in SI seconds"""
name = 'sec'
unit = 1. / erfa.DAYSEC # for quantity input
class TimeDeltaJD(TimeDeltaNumeric):
"""Time delta in Julian days (86400 SI seconds)"""
name = 'jd'
unit = 1.
class TimeDeltaDatetime(TimeDeltaFormat, TimeUnique):
"""Time delta in datetime.timedelta"""
name = 'datetime'
def _check_val_type(self, val1, val2):
if not all(isinstance(val, datetime.timedelta) for val in val1.flat):
raise TypeError('Input values for {} class must be '
'datetime.timedelta objects'.format(self.name))
if val2 is not None:
raise ValueError(
f'{self.name} objects do not accept a val2 but you provided {val2}')
return val1, None
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
iterator = np.nditer([val1, None, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=[None, np.double, np.double])
day = datetime.timedelta(days=1)
for val, jd1, jd2 in iterator:
jd1[...], other = divmod(val.item(), day)
jd2[...] = other / day
self.jd1, self.jd2 = day_frac(iterator.operands[-2],
iterator.operands[-1])
@property
def value(self):
iterator = np.nditer([self.jd1, self.jd2, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=[None, None, object])
for jd1, jd2, out in iterator:
jd1_, jd2_ = day_frac(jd1, jd2)
out[...] = datetime.timedelta(days=jd1_,
microseconds=jd2_ * 86400 * 1e6)
return self.mask_if_needed(iterator.operands[-1])
def _validate_jd_for_storage(jd):
if isinstance(jd, (float, int)):
return np.array(jd, dtype=np.float_)
if (isinstance(jd, np.generic)
and (jd.dtype.kind == 'f' and jd.dtype.itemsize <= 8
or jd.dtype.kind in 'iu')):
return np.array(jd, dtype=np.float_)
elif (isinstance(jd, np.ndarray)
and jd.dtype.kind == 'f'
and jd.dtype.itemsize == 8):
return jd
else:
raise TypeError(
f"JD values must be arrays (possibly zero-dimensional) "
f"of floats but we got {jd!r} of type {type(jd)}")
def _broadcast_writeable(jd1, jd2):
if jd1.shape == jd2.shape:
return jd1, jd2
# When using broadcast_arrays, *both* are flagged with
# warn-on-write, even the one that wasn't modified, and
# require "C" only clears the flag if it actually copied
# anything.
shape = np.broadcast(jd1, jd2).shape
if jd1.shape == shape:
s_jd1 = jd1
else:
s_jd1 = np.require(np.broadcast_to(jd1, shape),
requirements=["C", "W"])
if jd2.shape == shape:
s_jd2 = jd2
else:
s_jd2 = np.require(np.broadcast_to(jd2, shape),
requirements=["C", "W"])
return s_jd1, s_jd2
# Import symbols from core.py that are used in this module. This succeeds
# because __init__.py imports format.py just before core.py.
from .core import Time, TIME_SCALES, TIME_DELTA_SCALES, ScaleValueError # noqa
|
33528510d9720812eb7336678db328a5d9033499494aaf092f9737d7265fb448 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ['quantity_input']
import inspect
from collections.abc import Sequence
from functools import wraps
from numbers import Number
import numpy as np
from . import _typing as T
from .core import Unit, UnitBase, UnitsError, add_enabled_equivalencies, dimensionless_unscaled
from .function.core import FunctionUnitBase
from .physical import PhysicalType, get_physical_type
from .quantity import Quantity
from .structured import StructuredUnit
NoneType = type(None)
def _get_allowed_units(targets):
"""
From a list of target units (either as strings or unit objects) and physical
types, return a list of Unit objects.
"""
allowed_units = []
for target in targets:
try:
unit = Unit(target)
except (TypeError, ValueError):
try:
unit = get_physical_type(target)._unit
except (TypeError, ValueError, KeyError): # KeyError for Enum
raise ValueError(f"Invalid unit or physical type {target!r}.") from None
allowed_units.append(unit)
return allowed_units
def _validate_arg_value(param_name, func_name, arg, targets, equivalencies,
strict_dimensionless=False):
"""
Validates the object passed in to the wrapped function, ``arg``, with target
unit or physical type, ``target``.
"""
if len(targets) == 0:
return
allowed_units = _get_allowed_units(targets)
# If dimensionless is an allowed unit and the argument is unit-less,
# allow numbers or numpy arrays with numeric dtypes
if (dimensionless_unscaled in allowed_units and not strict_dimensionless
and not hasattr(arg, "unit")):
if isinstance(arg, Number):
return
elif (isinstance(arg, np.ndarray)
and np.issubdtype(arg.dtype, np.number)):
return
for allowed_unit in allowed_units:
try:
is_equivalent = arg.unit.is_equivalent(allowed_unit,
equivalencies=equivalencies)
if is_equivalent:
break
except AttributeError: # Either there is no .unit or no .is_equivalent
if hasattr(arg, "unit"):
error_msg = ("a 'unit' attribute without an 'is_equivalent' method")
else:
error_msg = "no 'unit' attribute"
raise TypeError(f"Argument '{param_name}' to function '{func_name}'"
f" has {error_msg}. You should pass in an astropy "
"Quantity instead.")
else:
error_msg = (f"Argument '{param_name}' to function '{func_name}' must "
"be in units convertible to")
if len(targets) > 1:
targ_names = ", ".join([f"'{str(targ)}'" for targ in targets])
raise UnitsError(f"{error_msg} one of: {targ_names}.")
else:
raise UnitsError(f"{error_msg} '{str(targets[0])}'.")
def _parse_annotation(target):
if target in (None, NoneType, inspect._empty):
return target
# check if unit-like
try:
unit = Unit(target)
except (TypeError, ValueError):
try:
ptype = get_physical_type(target)
except (TypeError, ValueError, KeyError): # KeyError for Enum
if isinstance(target, str):
raise ValueError(f"invalid unit or physical type {target!r}.") from None
else:
return ptype
else:
return unit
# could be a type hint
origin = T.get_origin(target)
if origin is T.Union:
return [_parse_annotation(t) for t in T.get_args(target)]
elif origin is not T.Annotated: # can't be Quantity[]
return False
# parse type hint
cls, *annotations = T.get_args(target)
if not issubclass(cls, Quantity) or not annotations:
return False
# get unit from type hint
unit, *rest = annotations
if not isinstance(unit, (UnitBase, PhysicalType)):
return False
return unit
class QuantityInput:
@classmethod
def as_decorator(cls, func=None, **kwargs):
r"""
A decorator for validating the units of arguments to functions.
Unit specifications can be provided as keyword arguments to the
decorator, or by using function annotation syntax. Arguments to the
decorator take precedence over any function annotations present.
A `~astropy.units.UnitsError` will be raised if the unit attribute of
the argument is not equivalent to the unit specified to the decorator or
in the annotation. If the argument has no unit attribute, i.e. it is not
a Quantity object, a `ValueError` will be raised unless the argument is
an annotation. This is to allow non Quantity annotations to pass
through.
Where an equivalency is specified in the decorator, the function will be
executed with that equivalency in force.
Notes
-----
The checking of arguments inside variable arguments to a function is not
supported (i.e. \*arg or \**kwargs).
The original function is accessible by the attributed ``__wrapped__``.
See :func:`functools.wraps` for details.
Examples
--------
.. code-block:: python
import astropy.units as u
@u.quantity_input(myangle=u.arcsec)
def myfunction(myangle):
return myangle**2
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec):
return myangle**2
Or using a unit-aware Quantity annotation.
.. code-block:: python
@u.quantity_input
def myfunction(myangle: u.Quantity[u.arcsec]):
return myangle**2
Also you can specify a return value annotation, which will
cause the function to always return a `~astropy.units.Quantity` in that
unit.
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec) -> u.deg**2:
return myangle**2
Using equivalencies::
import astropy.units as u
@u.quantity_input(myenergy=u.eV, equivalencies=u.mass_energy())
def myfunction(myenergy):
return myenergy**2
"""
self = cls(**kwargs)
if func is not None and not kwargs:
return self(func)
else:
return self
def __init__(self, func=None, strict_dimensionless=False, **kwargs):
self.equivalencies = kwargs.pop('equivalencies', [])
self.decorator_kwargs = kwargs
self.strict_dimensionless = strict_dimensionless
def __call__(self, wrapped_function):
# Extract the function signature for the function we are wrapping.
wrapped_signature = inspect.signature(wrapped_function)
# Define a new function to return in place of the wrapped one
@wraps(wrapped_function)
def wrapper(*func_args, **func_kwargs):
# Bind the arguments to our new function to the signature of the original.
bound_args = wrapped_signature.bind(*func_args, **func_kwargs)
# Iterate through the parameters of the original signature
for param in wrapped_signature.parameters.values():
# We do not support variable arguments (*args, **kwargs)
if param.kind in (inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL):
continue
# Catch the (never triggered) case where bind relied on a default value.
if (param.name not in bound_args.arguments
and param.default is not param.empty):
bound_args.arguments[param.name] = param.default
# Get the value of this parameter (argument to new function)
arg = bound_args.arguments[param.name]
# Get target unit or physical type, either from decorator kwargs
# or annotations
if param.name in self.decorator_kwargs:
targets = self.decorator_kwargs[param.name]
is_annotation = False
else:
targets = param.annotation
is_annotation = True
# parses to unit if it's an annotation (or list thereof)
targets = _parse_annotation(targets)
# If the targets is empty, then no target units or physical
# types were specified so we can continue to the next arg
if targets is inspect.Parameter.empty:
continue
# If the argument value is None, and the default value is None,
# pass through the None even if there is a target unit
if arg is None and param.default is None:
continue
# Here, we check whether multiple target unit/physical type's
# were specified in the decorator/annotation, or whether a
# single string (unit or physical type) or a Unit object was
# specified
if (isinstance(targets, str)
or not isinstance(targets, Sequence)):
valid_targets = [targets]
# Check for None in the supplied list of allowed units and, if
# present and the passed value is also None, ignore.
elif None in targets or NoneType in targets:
if arg is None:
continue
else:
valid_targets = [t for t in targets if t is not None]
else:
valid_targets = targets
# If we're dealing with an annotation, skip all the targets that
# are not strings or subclasses of Unit. This is to allow
# non unit related annotations to pass through
if is_annotation:
valid_targets = [t for t in valid_targets
if isinstance(t, (str, UnitBase, PhysicalType))]
# Now we loop over the allowed units/physical types and validate
# the value of the argument:
_validate_arg_value(param.name, wrapped_function.__name__,
arg, valid_targets, self.equivalencies,
self.strict_dimensionless)
# Call the original function with any equivalencies in force.
with add_enabled_equivalencies(self.equivalencies):
return_ = wrapped_function(*func_args, **func_kwargs)
# Return
ra = wrapped_signature.return_annotation
valid_empty = (inspect.Signature.empty, None, NoneType, T.NoReturn)
if ra not in valid_empty:
target = (ra if T.get_origin(ra) not in (T.Annotated, T.Union)
else _parse_annotation(ra))
if isinstance(target, str) or not isinstance(target, Sequence):
target = [target]
valid_targets = [t for t in target
if isinstance(t, (str, UnitBase, PhysicalType))]
_validate_arg_value("return", wrapped_function.__name__,
return_, valid_targets, self.equivalencies,
self.strict_dimensionless)
if len(valid_targets) > 0:
return_ <<= valid_targets[0]
return return_
return wrapper
quantity_input = QuantityInput.as_decorator
|
cbe40d5345b69ac80bbc66977f447ecbd1cd4efc98a89d7adefda0e432cd06f4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines colloquially used Imperial units. They are
available in the `astropy.units.imperial` namespace, but not in the
top-level `astropy.units` namespace, e.g.::
>>> import astropy.units as u
>>> mph = u.imperial.mile / u.hour
>>> mph
Unit("mi / h")
To include them in `~astropy.units.UnitBase.compose` and the results of
`~astropy.units.UnitBase.find_equivalent_units`, do::
>>> import astropy.units as u
>>> u.imperial.enable() # doctest: +SKIP
"""
from . import si
from .core import UnitBase, def_unit
_ns = globals()
###########################################################################
# LENGTH
def_unit(['inch'], 2.54 * si.cm, namespace=_ns,
doc="International inch")
def_unit(['ft', 'foot'], 12 * inch, namespace=_ns,
doc="International foot")
def_unit(['yd', 'yard'], 3 * ft, namespace=_ns,
doc="International yard")
def_unit(['mi', 'mile'], 5280 * ft, namespace=_ns,
doc="International mile")
def_unit(['mil', 'thou'], 0.001 * inch, namespace=_ns,
doc="Thousandth of an inch")
def_unit(['nmi', 'nauticalmile', 'NM'], 1852 * si.m, namespace=_ns,
doc="Nautical mile")
def_unit(['fur', 'furlong'], 660 * ft, namespace=_ns,
doc="Furlong")
###########################################################################
# AREAS
def_unit(['ac', 'acre'], 43560 * ft ** 2, namespace=_ns,
doc="International acre")
###########################################################################
# VOLUMES
def_unit(['gallon'], si.liter / 0.264172052, namespace=_ns,
doc="U.S. liquid gallon")
def_unit(['quart'], gallon / 4, namespace=_ns,
doc="U.S. liquid quart")
def_unit(['pint'], quart / 2, namespace=_ns,
doc="U.S. liquid pint")
def_unit(['cup'], pint / 2, namespace=_ns,
doc="U.S. customary cup")
def_unit(['foz', 'fluid_oz', 'fluid_ounce'], cup / 8, namespace=_ns,
doc="U.S. fluid ounce")
def_unit(['tbsp', 'tablespoon'], foz / 2, namespace=_ns,
doc="U.S. customary tablespoon")
def_unit(['tsp', 'teaspoon'], tbsp / 3, namespace=_ns,
doc="U.S. customary teaspoon")
###########################################################################
# MASS
def_unit(['oz', 'ounce'], 28.349523125 * si.g, namespace=_ns,
doc="International avoirdupois ounce: mass")
def_unit(['lb', 'lbm', 'pound'], 16 * oz, namespace=_ns,
doc="International avoirdupois pound: mass")
def_unit(['st', 'stone'], 14 * lb, namespace=_ns,
doc="International avoirdupois stone: mass")
def_unit(['ton'], 2000 * lb, namespace=_ns,
doc="International avoirdupois ton: mass")
def_unit(['slug'], 32.174049 * lb, namespace=_ns,
doc="slug: mass")
###########################################################################
# SPEED
def_unit(['kn', 'kt', 'knot', 'NMPH'], nmi / si.h, namespace=_ns,
doc="nautical unit of speed: 1 nmi per hour")
###########################################################################
# FORCE
def_unit('lbf', slug * ft * si.s**-2, namespace=_ns,
doc="Pound: force")
def_unit(['kip', 'kilopound'], 1000 * lbf, namespace=_ns,
doc="Kilopound: force")
##########################################################################
# ENERGY
def_unit(['BTU', 'btu'], 1.05505585 * si.kJ, namespace=_ns,
doc="British thermal unit")
def_unit(['cal', 'calorie'], 4.184 * si.J, namespace=_ns,
doc="Thermochemical calorie: pre-SI metric unit of energy")
def_unit(['kcal', 'Cal', 'Calorie', 'kilocal', 'kilocalorie'],
1000 * cal, namespace=_ns,
doc="Calorie: colloquial definition of Calorie")
##########################################################################
# PRESSURE
def_unit('psi', lbf * inch ** -2, namespace=_ns,
doc="Pound per square inch: pressure")
###########################################################################
# POWER
# Imperial units
def_unit(['hp', 'horsepower'], si.W / 0.00134102209, namespace=_ns,
doc="Electrical horsepower")
###########################################################################
# TEMPERATURE
def_unit(['deg_F', 'Fahrenheit'], namespace=_ns, doc='Degrees Fahrenheit',
format={'latex': r'{}^{\circ}F', 'unicode': '°F'})
def_unit(['deg_R', 'Rankine'], namespace=_ns, doc='Rankine scale: absolute scale of thermodynamic temperature')
###########################################################################
# CLEANUP
del UnitBase
del def_unit
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
def enable():
"""
Enable Imperial units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`.
This may be used with the ``with`` statement to enable Imperial
units only temporarily.
"""
# Local import to avoid cyclical import
# Local import to avoid polluting namespace
import inspect
from .core import add_enabled_units
return add_enabled_units(inspect.getmodule(enable))
|
2458707807fb6adcfad398f526064825f63b40f5b4e7e5c954727affe1efe7ed | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines units used in the CDS format, both the units
defined in `Centre de Données astronomiques de Strasbourg
<http://cds.u-strasbg.fr/>`_ `Standards for Astronomical Catalogues 2.0
<http://vizier.u-strasbg.fr/vizier/doc/catstd-3.2.htx>`_ format and the `complete
set of supported units <https://vizier.u-strasbg.fr/viz-bin/Unit>`_.
This format is used by VOTable up to version 1.2.
These units are not available in the top-level `astropy.units`
namespace. To use these units, you must import the `astropy.units.cds`
module::
>>> from astropy.units import cds
>>> q = 10. * cds.lyr # doctest: +SKIP
To include them in `~astropy.units.UnitBase.compose` and the results of
`~astropy.units.UnitBase.find_equivalent_units`, do::
>>> from astropy.units import cds
>>> cds.enable() # doctest: +SKIP
"""
_ns = globals()
def _initialize_module():
"""Initialize CDS units module."""
# Local imports to avoid polluting top-level namespace
import numpy as np
from astropy import units as u
from astropy.constants import si as _si
from . import core
# The CDS format also supports power-of-2 prefixes as defined here:
# http://physics.nist.gov/cuu/Units/binary.html
prefixes = core.si_prefixes + core.binary_prefixes
# CDS only uses the short prefixes
prefixes = [(short, short, factor) for (short, long, factor) in prefixes]
# The following units are defined in alphabetical order, directly from
# here: https://vizier.u-strasbg.fr/viz-bin/Unit
mapping = [
(['A'], u.A, "Ampere"),
(['a'], u.a, "year", ['P']),
(['a0'], _si.a0, "Bohr radius"),
(['al'], u.lyr, "Light year", ['c', 'd']),
(['lyr'], u.lyr, "Light year"),
(['alpha'], _si.alpha, "Fine structure constant"),
((['AA', 'Å'], ['Angstrom', 'Angstroem']), u.AA, "Angstrom"),
(['arcmin', 'arcm'], u.arcminute, "minute of arc"),
(['arcsec', 'arcs'], u.arcsecond, "second of arc"),
(['atm'], _si.atm, "atmosphere"),
(['AU', 'au'], u.au, "astronomical unit"),
(['bar'], u.bar, "bar"),
(['barn'], u.barn, "barn"),
(['bit'], u.bit, "bit"),
(['byte'], u.byte, "byte"),
(['C'], u.C, "Coulomb"),
(['c'], _si.c, "speed of light", ['p']),
(['cal'], 4.1854 * u.J, "calorie"),
(['cd'], u.cd, "candela"),
(['ct'], u.ct, "count"),
(['D'], u.D, "Debye (dipole)"),
(['d'], u.d, "Julian day", ['c']),
((['deg', '°'], ['degree']), u.degree, "degree"),
(['dyn'], u.dyn, "dyne"),
(['e'], _si.e, "electron charge", ['m']),
(['eps0'], _si.eps0, "electric constant"),
(['erg'], u.erg, "erg"),
(['eV'], u.eV, "electron volt"),
(['F'], u.F, "Farad"),
(['G'], _si.G, "Gravitation constant"),
(['g'], u.g, "gram"),
(['gauss'], u.G, "Gauss"),
(['geoMass', 'Mgeo'], u.M_earth, "Earth mass"),
(['H'], u.H, "Henry"),
(['h'], u.h, "hour", ['p']),
(['hr'], u.h, "hour"),
(['\\h'], _si.h, "Planck constant"),
(['Hz'], u.Hz, "Hertz"),
(['inch'], 0.0254 * u.m, "inch"),
(['J'], u.J, "Joule"),
(['JD'], u.d, "Julian day", ['M']),
(['jovMass', 'Mjup'], u.M_jup, "Jupiter mass"),
(['Jy'], u.Jy, "Jansky"),
(['K'], u.K, "Kelvin"),
(['k'], _si.k_B, "Boltzmann"),
(['l'], u.l, "litre", ['a']),
(['lm'], u.lm, "lumen"),
(['Lsun', 'solLum'], u.solLum, "solar luminosity"),
(['lx'], u.lx, "lux"),
(['m'], u.m, "meter"),
(['mag'], u.mag, "magnitude"),
(['me'], _si.m_e, "electron mass"),
(['min'], u.minute, "minute"),
(['MJD'], u.d, "Julian day"),
(['mmHg'], 133.322387415 * u.Pa, "millimeter of mercury"),
(['mol'], u.mol, "mole"),
(['mp'], _si.m_p, "proton mass"),
(['Msun', 'solMass'], u.solMass, "solar mass"),
((['mu0', 'µ0'], []), _si.mu0, "magnetic constant"),
(['muB'], _si.muB, "Bohr magneton"),
(['N'], u.N, "Newton"),
(['Ohm'], u.Ohm, "Ohm"),
(['Pa'], u.Pa, "Pascal"),
(['pc'], u.pc, "parsec"),
(['ph'], u.ph, "photon"),
(['pi'], u.Unit(np.pi), "π"),
(['pix'], u.pix, "pixel"),
(['ppm'], u.Unit(1e-6), "parts per million"),
(['R'], _si.R, "gas constant"),
(['rad'], u.radian, "radian"),
(['Rgeo'], _si.R_earth, "Earth equatorial radius"),
(['Rjup'], _si.R_jup, "Jupiter equatorial radius"),
(['Rsun', 'solRad'], u.solRad, "solar radius"),
(['Ry'], u.Ry, "Rydberg"),
(['S'], u.S, "Siemens"),
(['s', 'sec'], u.s, "second"),
(['sr'], u.sr, "steradian"),
(['Sun'], u.Sun, "solar unit"),
(['T'], u.T, "Tesla"),
(['t'], 1e3 * u.kg, "metric tonne", ['c']),
(['u'], _si.u, "atomic mass", ['da', 'a']),
(['V'], u.V, "Volt"),
(['W'], u.W, "Watt"),
(['Wb'], u.Wb, "Weber"),
(['yr'], u.a, "year"),
]
for entry in mapping:
if len(entry) == 3:
names, unit, doc = entry
excludes = []
else:
names, unit, doc, excludes = entry
core.def_unit(names, unit, prefixes=prefixes, namespace=_ns, doc=doc,
exclude_prefixes=excludes)
core.def_unit(['µas'], u.microarcsecond,
doc="microsecond of arc", namespace=_ns)
core.def_unit(['mas'], u.milliarcsecond,
doc="millisecond of arc", namespace=_ns)
core.def_unit(['---', '-'], u.dimensionless_unscaled,
doc="dimensionless and unscaled", namespace=_ns)
core.def_unit(['%'], u.percent,
doc="percent", namespace=_ns)
# The Vizier "standard" defines this in units of "kg s-3", but
# that may not make a whole lot of sense, so here we just define
# it as its own new disconnected unit.
core.def_unit(['Crab'], prefixes=prefixes, namespace=_ns,
doc="Crab (X-ray) flux")
_initialize_module()
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
def enable():
"""
Enable CDS units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`. This will disable
all of the "default" `astropy.units` units, since there
are some namespace clashes between the two.
This may be used with the ``with`` statement to enable CDS
units only temporarily.
"""
# Local imports to avoid cyclical import and polluting namespace
import inspect
from .core import set_enabled_units
return set_enabled_units(inspect.getmodule(enable))
|
c9f4dba9d73b1f525c468ffede1e00c68adf2706f6fbc7c8cee169072895d1b6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines magnitude zero points and related photometric quantities.
The corresponding magnitudes are given in the description of each unit
(the actual definitions are in `~astropy.units.function.logarithmic`).
"""
import numpy as _numpy
from astropy.constants import si as _si
from . import astrophys, cgs, si
from .core import Unit, UnitBase, def_unit
_ns = globals()
def_unit(['Bol', 'L_bol'], _si.L_bol0, namespace=_ns, prefixes=False,
doc="Luminosity corresponding to absolute bolometric magnitude zero "
"(magnitude ``M_bol``).")
def_unit(['bol', 'f_bol'], _si.L_bol0 / (4 * _numpy.pi * (10.*astrophys.pc)**2),
namespace=_ns, prefixes=False, doc="Irradiance corresponding to "
"appparent bolometric magnitude zero (magnitude ``m_bol``).")
def_unit(['AB', 'ABflux'], 10.**(48.6/-2.5) * cgs.erg * cgs.cm**-2 / si.s / si.Hz,
namespace=_ns, prefixes=False,
doc="AB magnitude zero flux density (magnitude ``ABmag``).")
def_unit(['ST', 'STflux'], 10.**(21.1/-2.5) * cgs.erg * cgs.cm**-2 / si.s / si.AA,
namespace=_ns, prefixes=False,
doc="ST magnitude zero flux density (magnitude ``STmag``).")
def_unit(['mgy', 'maggy'],
namespace=_ns, prefixes=[(['n'], ['nano'], 1e-9)],
doc="Maggies - a linear flux unit that is the flux for a mag=0 object."
"To tie this onto a specific calibrated unit system, the "
"zero_point_flux equivalency should be used.")
def zero_point_flux(flux0):
"""
An equivalency for converting linear flux units ("maggys") defined relative
to a standard source into a standardized system.
Parameters
----------
flux0 : `~astropy.units.Quantity`
The flux of a magnitude-0 object in the "maggy" system.
"""
flux_unit0 = Unit(flux0)
return [(maggy, flux_unit0)]
###########################################################################
# CLEANUP
del UnitBase
del def_unit
del cgs, si, astrophys
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
f88605d597c02562db1248d829743eaa7c1a705b0271139cf8f4f83cbc4405cb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Core units classes and functions
"""
import inspect
import operator
import textwrap
import warnings
import numpy as np
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import isiterable
from . import format as unit_format
from .utils import is_effectively_unity, resolve_fractions, sanitize_scale, validate_power
__all__ = [
'UnitsError', 'UnitsWarning', 'UnitConversionError', 'UnitTypeError',
'UnitBase', 'NamedUnit', 'IrreducibleUnit', 'Unit', 'CompositeUnit',
'PrefixUnit', 'UnrecognizedUnit', 'def_unit', 'get_current_unit_registry',
'set_enabled_units', 'add_enabled_units',
'set_enabled_equivalencies', 'add_enabled_equivalencies',
'set_enabled_aliases', 'add_enabled_aliases',
'dimensionless_unscaled', 'one',
]
UNITY = 1.0
def _flatten_units_collection(items):
"""
Given a list of sequences, modules or dictionaries of units, or
single units, return a flat set of all the units found.
"""
if not isinstance(items, list):
items = [items]
result = set()
for item in items:
if isinstance(item, UnitBase):
result.add(item)
else:
if isinstance(item, dict):
units = item.values()
elif inspect.ismodule(item):
units = vars(item).values()
elif isiterable(item):
units = item
else:
continue
for unit in units:
if isinstance(unit, UnitBase):
result.add(unit)
return result
def _normalize_equivalencies(equivalencies):
"""
Normalizes equivalencies, ensuring each is a 4-tuple of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs
Raises
------
ValueError if an equivalency cannot be interpreted
"""
if equivalencies is None:
return []
normalized = []
for i, equiv in enumerate(equivalencies):
if len(equiv) == 2:
funit, tunit = equiv
a = b = lambda x: x
elif len(equiv) == 3:
funit, tunit, a = equiv
b = a
elif len(equiv) == 4:
funit, tunit, a, b = equiv
else:
raise ValueError(
f"Invalid equivalence entry {i}: {equiv!r}")
if not (funit is Unit(funit) and
(tunit is None or tunit is Unit(tunit)) and
callable(a) and
callable(b)):
raise ValueError(
f"Invalid equivalence entry {i}: {equiv!r}")
normalized.append((funit, tunit, a, b))
return normalized
class _UnitRegistry:
"""
Manages a registry of the enabled units.
"""
def __init__(self, init=[], equivalencies=[], aliases={}):
if isinstance(init, _UnitRegistry):
# If passed another registry we don't need to rebuild everything.
# but because these are mutable types we don't want to create
# conflicts so everything needs to be copied.
self._equivalencies = init._equivalencies.copy()
self._aliases = init._aliases.copy()
self._all_units = init._all_units.copy()
self._registry = init._registry.copy()
self._non_prefix_units = init._non_prefix_units.copy()
# The physical type is a dictionary containing sets as values.
# All of these must be copied otherwise we could alter the old
# registry.
self._by_physical_type = {k: v.copy() for k, v in
init._by_physical_type.items()}
else:
self._reset_units()
self._reset_equivalencies()
self._reset_aliases()
self.add_enabled_units(init)
self.add_enabled_equivalencies(equivalencies)
self.add_enabled_aliases(aliases)
def _reset_units(self):
self._all_units = set()
self._non_prefix_units = set()
self._registry = {}
self._by_physical_type = {}
def _reset_equivalencies(self):
self._equivalencies = set()
def _reset_aliases(self):
self._aliases = {}
@property
def registry(self):
return self._registry
@property
def all_units(self):
return self._all_units
@property
def non_prefix_units(self):
return self._non_prefix_units
def set_enabled_units(self, units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by
methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
"""
self._reset_units()
return self.add_enabled_units(units)
def add_enabled_units(self, units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for
searching through by methods like
`UnitBase.find_equivalent_units` and `UnitBase.compose`.
"""
units = _flatten_units_collection(units)
for unit in units:
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for st in unit._names:
if (st in self._registry and unit != self._registry[st]):
raise ValueError(
"Object with name {!r} already exists in namespace. "
"Filter the set of units to avoid name clashes before "
"enabling them.".format(st))
for st in unit._names:
self._registry[st] = unit
self._all_units.add(unit)
if not isinstance(unit, PrefixUnit):
self._non_prefix_units.add(unit)
hash = unit._get_physical_type_id()
self._by_physical_type.setdefault(hash, set()).add(unit)
def get_units_with_physical_type(self, unit):
"""
Get all units in the registry with the same physical type as
the given unit.
Parameters
----------
unit : UnitBase instance
"""
return self._by_physical_type.get(unit._get_physical_type_id(), set())
@property
def equivalencies(self):
return list(self._equivalencies)
def set_enabled_equivalencies(self, equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
List of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
self._reset_equivalencies()
return self.add_enabled_equivalencies(equivalencies)
def add_enabled_equivalencies(self, equivalencies):
"""
Adds to the set of equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
List of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# pre-normalize list to help catch mistakes
equivalencies = _normalize_equivalencies(equivalencies)
self._equivalencies |= set(equivalencies)
@property
def aliases(self):
return self._aliases
def set_enabled_aliases(self, aliases):
"""
Set aliases for units.
Parameters
----------
aliases : dict of str, Unit
The aliases to set. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
"""
self._reset_aliases()
self.add_enabled_aliases(aliases)
def add_enabled_aliases(self, aliases):
"""
Add aliases for units.
Parameters
----------
aliases : dict of str, Unit
The aliases to add. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
"""
for alias, unit in aliases.items():
if alias in self._registry and unit != self._registry[alias]:
raise ValueError(
f"{alias} already means {self._registry[alias]}, so "
f"cannot be used as an alias for {unit}.")
if alias in self._aliases and unit != self._aliases[alias]:
raise ValueError(
f"{alias} already is an alias for {self._aliases[alias]}, so "
f"cannot be used as an alias for {unit}.")
for alias, unit in aliases.items():
if alias not in self._registry and alias not in self._aliases:
self._aliases[alias] = unit
class _UnitContext:
def __init__(self, init=[], equivalencies=[]):
_unit_registries.append(
_UnitRegistry(init=init, equivalencies=equivalencies))
def __enter__(self):
pass
def __exit__(self, type, value, tb):
_unit_registries.pop()
_unit_registries = [_UnitRegistry()]
def get_current_unit_registry():
return _unit_registries[-1]
def set_enabled_units(units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by methods
like `UnitBase.find_equivalent_units` and `UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> with u.set_enabled_units([u.pc]):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
pc | 3.08568e+16 m | parsec ,
]
>>> u.m.find_equivalent_units()
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lsec | 2.99792e+08 m | lightsecond ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
micron | 1e-06 m | ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
]
"""
# get a context with a new registry, using equivalencies of the current one
context = _UnitContext(
equivalencies=get_current_unit_registry().equivalencies)
# in this new current registry, enable the units requested
get_current_unit_registry().set_enabled_units(units)
return context
def add_enabled_units(units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for searching
through by methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> from astropy.units import imperial
>>> with u.add_enabled_units(imperial):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
ft | 0.3048 m | foot ,
fur | 201.168 m | furlong ,
inch | 0.0254 m | ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lsec | 2.99792e+08 m | lightsecond ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
mi | 1609.34 m | mile ,
micron | 1e-06 m | ,
mil | 2.54e-05 m | thou ,
nmi | 1852 m | nauticalmile, NM ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
yd | 0.9144 m | yard ,
]
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further units requested
get_current_unit_registry().add_enabled_units(units)
return context
def set_enabled_equivalencies(equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
list of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
Examples
--------
Exponentiation normally requires dimensionless quantities. To avoid
problems with complex phases::
>>> from astropy import units as u
>>> with u.set_enabled_equivalencies(u.dimensionless_angles()):
... phase = 0.5 * u.cycle
... np.exp(1j*phase) # doctest: +FLOAT_CMP
<Quantity -1.+1.2246468e-16j>
"""
# get a context with a new registry, using all units of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the equivalencies requested
get_current_unit_registry().set_enabled_equivalencies(equivalencies)
return context
def add_enabled_equivalencies(equivalencies):
"""
Adds to the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Since no equivalencies are enabled by default, generally it is recommended
to use `set_enabled_equivalencies`.
Parameters
----------
equivalencies : list of tuple
list of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().add_enabled_equivalencies(equivalencies)
return context
def set_enabled_aliases(aliases):
"""
Set aliases for units.
This is useful for handling alternate spellings for units, or
misspelled units in files one is trying to read.
Parameters
----------
aliases : dict of str, Unit
The aliases to set. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
Examples
--------
To temporarily allow for a misspelled 'Angstroem' unit::
>>> from astropy import units as u
>>> with u.set_enabled_aliases({'Angstroem': u.Angstrom}):
... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom)
True
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().set_enabled_aliases(aliases)
return context
def add_enabled_aliases(aliases):
"""
Add aliases for units.
This is useful for handling alternate spellings for units, or
misspelled units in files one is trying to read.
Since no aliases are enabled by default, generally it is recommended
to use `set_enabled_aliases`.
Parameters
----------
aliases : dict of str, Unit
The aliases to add. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
Examples
--------
To temporarily allow for a misspelled 'Angstroem' unit::
>>> from astropy import units as u
>>> with u.add_enabled_aliases({'Angstroem': u.Angstrom}):
... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom)
True
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().add_enabled_aliases(aliases)
return context
class UnitsError(Exception):
"""
The base class for unit-specific exceptions.
"""
class UnitScaleError(UnitsError, ValueError):
"""
Used to catch the errors involving scaled units,
which are not recognized by FITS format.
"""
pass
class UnitConversionError(UnitsError, ValueError):
"""
Used specifically for errors related to converting between units or
interpreting units in terms of other units.
"""
class UnitTypeError(UnitsError, TypeError):
"""
Used specifically for errors in setting to units not allowed by a class.
E.g., would be raised if the unit of an `~astropy.coordinates.Angle`
instances were set to a non-angular unit.
"""
class UnitsWarning(AstropyWarning):
"""
The base class for unit-specific warnings.
"""
class UnitBase:
"""
Abstract base class for units.
Most of the arithmetic operations on units are defined in this
base class.
Should not be instantiated by users directly.
"""
# Make sure that __rmul__ of units gets called over the __mul__ of Numpy
# arrays to avoid element-wise multiplication.
__array_priority__ = 1000
_hash = None
def __deepcopy__(self, memo):
# This may look odd, but the units conversion will be very
# broken after deep-copying if we don't guarantee that a given
# physical unit corresponds to only one instance
return self
def _repr_latex_(self):
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
return unit_format.Latex.to_string(self)
def __bytes__(self):
"""Return string representation for unit"""
return unit_format.Generic.to_string(self).encode('unicode_escape')
def __str__(self):
"""Return string representation for unit"""
return unit_format.Generic.to_string(self)
def __repr__(self):
string = unit_format.Generic.to_string(self)
return f'Unit("{string}")'
def _get_physical_type_id(self):
"""
Returns an identifier that uniquely identifies the physical
type of this unit. It is comprised of the bases and powers of
this unit, without the scale. Since it is hashable, it is
useful as a dictionary key.
"""
unit = self.decompose()
r = zip([x.name for x in unit.bases], unit.powers)
# bases and powers are already sorted in a unique way
# r.sort()
r = tuple(r)
return r
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. "
"Perhaps you meant to_string()?")
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. "
"Perhaps you meant to_string()?")
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
raise AttributeError(
"Can not get aliases from unnamed units. "
"Perhaps you meant to_string()?")
@property
def scale(self):
"""
Return the scale of the unit.
"""
return 1.0
@property
def bases(self):
"""
Return the bases of the unit.
"""
return [self]
@property
def powers(self):
"""
Return the powers of the unit.
"""
return [1]
def to_string(self, format=unit_format.Generic):
"""
Output the unit in the given format as a string.
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
"""
f = unit_format.get_format(format)
return f.to_string(self)
def __format__(self, format_spec):
"""Try to format units using a formatter."""
try:
return self.to_string(format=format_spec)
except ValueError:
return format(str(self), format_spec)
@staticmethod
def _normalize_equivalencies(equivalencies):
"""
Normalizes equivalencies, ensuring each is a 4-tuple of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs, or None
Returns
-------
A normalized list, including possible global defaults set by, e.g.,
`set_enabled_equivalencies`, except when `equivalencies`=`None`,
in which case the returned list is always empty.
Raises
------
ValueError if an equivalency cannot be interpreted
"""
normalized = _normalize_equivalencies(equivalencies)
if equivalencies is not None:
normalized += get_current_unit_registry().equivalencies
return normalized
def __pow__(self, p):
p = validate_power(p)
return CompositeUnit(1, [self], [p], _error_check=False)
def __truediv__(self, m):
if isinstance(m, (bytes, str)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
return CompositeUnit(1, [self, m], [1, -1], _error_check=False)
try:
# Cannot handle this as Unit, re-try as Quantity
from .quantity import Quantity
return Quantity(1, self) / m
except TypeError:
return NotImplemented
def __rtruediv__(self, m):
if isinstance(m, (bytes, str)):
return Unit(m) / self
try:
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a
# unit, for the common case of <array> / <unit>.
from .quantity import Quantity
if hasattr(m, 'unit'):
result = Quantity(m)
result /= self
return result
else:
return Quantity(m, self**(-1))
except TypeError:
return NotImplemented
def __mul__(self, m):
if isinstance(m, (bytes, str)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
elif self.is_unity():
return m
return CompositeUnit(1, [self, m], [1, 1], _error_check=False)
# Cannot handle this as Unit, re-try as Quantity.
try:
from .quantity import Quantity
return Quantity(1, unit=self) * m
except TypeError:
return NotImplemented
def __rmul__(self, m):
if isinstance(m, (bytes, str)):
return Unit(m) * self
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a unit
# for the common case of <array> * <unit>.
try:
from .quantity import Quantity
if hasattr(m, 'unit'):
result = Quantity(m)
result *= self
return result
else:
return Quantity(m, unit=self)
except TypeError:
return NotImplemented
def __rlshift__(self, m):
try:
from .quantity import Quantity
return Quantity(m, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __rrshift__(self, m):
warnings.warn(">> is not implemented. Did you mean to convert "
"to a Quantity with unit {} using '<<'?".format(self),
AstropyWarning)
return NotImplemented
def __hash__(self):
if self._hash is None:
parts = ([str(self.scale)] +
[x.name for x in self.bases] +
[str(x) for x in self.powers])
self._hash = hash(tuple(parts))
return self._hash
def __getstate__(self):
# If we get pickled, we should *not* store the memoized hash since
# hashes of strings vary between sessions.
state = self.__dict__.copy()
state.pop('_hash', None)
return state
def __eq__(self, other):
if self is other:
return True
try:
other = Unit(other, parse_strict='silent')
except (ValueError, UnitsError, TypeError):
return NotImplemented
# Other is unit-like, but the test below requires it is a UnitBase
# instance; if it is not, give up (so that other can try).
if not isinstance(other, UnitBase):
return NotImplemented
try:
return is_effectively_unity(self._to(other))
except UnitsError:
return False
def __ne__(self, other):
return not (self == other)
def __le__(self, other):
scale = self._to(Unit(other))
return scale <= 1. or is_effectively_unity(scale)
def __ge__(self, other):
scale = self._to(Unit(other))
return scale >= 1. or is_effectively_unity(scale)
def __lt__(self, other):
return not (self >= other)
def __gt__(self, other):
return not (self <= other)
def __neg__(self):
return self * -1.
def is_equivalent(self, other, equivalencies=[]):
"""
Returns `True` if this unit is equivalent to ``other``.
Parameters
----------
other : `~astropy.units.Unit`, str, or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
bool
"""
equivalencies = self._normalize_equivalencies(equivalencies)
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies=equivalencies)
for u in other)
other = Unit(other, parse_strict='silent')
return self._is_equivalent(other, equivalencies)
def _is_equivalent(self, other, equivalencies=[]):
"""Returns `True` if this unit is equivalent to `other`.
See `is_equivalent`, except that a proper Unit object should be
given (i.e., no string) and that the equivalency list should be
normalized using `_normalize_equivalencies`.
"""
if isinstance(other, UnrecognizedUnit):
return False
if (self._get_physical_type_id() ==
other._get_physical_type_id()):
return True
elif len(equivalencies):
unit = self.decompose()
other = other.decompose()
for a, b, forward, backward in equivalencies:
if b is None:
# after canceling, is what's left convertible
# to dimensionless (according to the equivalency)?
try:
(other/unit).decompose([a])
return True
except Exception:
pass
else:
if(a._is_equivalent(unit) and b._is_equivalent(other) or
b._is_equivalent(unit) and a._is_equivalent(other)):
return True
return False
def _apply_equivalencies(self, unit, other, equivalencies):
"""
Internal function (used from `_get_converter`) to apply
equivalence pairs.
"""
def make_converter(scale1, func, scale2):
def convert(v):
return func(_condition_arg(v) / scale1) * scale2
return convert
for funit, tunit, a, b in equivalencies:
if tunit is None:
try:
ratio_in_funit = (other.decompose() /
unit.decompose()).decompose([funit])
return make_converter(ratio_in_funit.scale, a, 1.)
except UnitsError:
pass
else:
try:
scale1 = funit._to(unit)
scale2 = tunit._to(other)
return make_converter(scale1, a, scale2)
except UnitsError:
pass
try:
scale1 = tunit._to(unit)
scale2 = funit._to(other)
return make_converter(scale1, b, scale2)
except UnitsError:
pass
def get_err_str(unit):
unit_str = unit.to_string('unscaled')
physical_type = unit.physical_type
if physical_type != 'unknown':
unit_str = f"'{unit_str}' ({physical_type})"
else:
unit_str = f"'{unit_str}'"
return unit_str
unit_str = get_err_str(unit)
other_str = get_err_str(other)
raise UnitConversionError(
f"{unit_str} and {other_str} are not convertible")
def _get_converter(self, other, equivalencies=[]):
"""Get a converter for values in ``self`` to ``other``.
If no conversion is necessary, returns ``unit_scale_converter``
(which is used as a check in quantity helpers).
"""
# First see if it is just a scaling.
try:
scale = self._to(other)
except UnitsError:
pass
else:
if scale == 1.:
return unit_scale_converter
else:
return lambda val: scale * _condition_arg(val)
# if that doesn't work, maybe we can do it with equivalencies?
try:
return self._apply_equivalencies(
self, other, self._normalize_equivalencies(equivalencies))
except UnitsError as exc:
# Last hope: maybe other knows how to do it?
# We assume the equivalencies have the unit itself as first item.
# TODO: maybe better for other to have a `_back_converter` method?
if hasattr(other, 'equivalencies'):
for funit, tunit, a, b in other.equivalencies:
if other is funit:
try:
return lambda v: b(self._get_converter(
tunit, equivalencies=equivalencies)(v))
except Exception:
pass
raise exc
def _to(self, other):
"""
Returns the scale to the specified unit.
See `to`, except that a Unit object should be given (i.e., no
string), and that all defaults are used, i.e., no
equivalencies and value=1.
"""
# There are many cases where we just want to ensure a Quantity is
# of a particular unit, without checking whether it's already in
# a particular unit. If we're being asked to convert from a unit
# to itself, we can short-circuit all of this.
if self is other:
return 1.0
# Don't presume decomposition is possible; e.g.,
# conversion to function units is through equivalencies.
if isinstance(other, UnitBase):
self_decomposed = self.decompose()
other_decomposed = other.decompose()
# Check quickly whether equivalent. This is faster than
# `is_equivalent`, because it doesn't generate the entire
# physical type list of both units. In other words it "fails
# fast".
if(self_decomposed.powers == other_decomposed.powers and
all(self_base is other_base for (self_base, other_base)
in zip(self_decomposed.bases, other_decomposed.bases))):
return self_decomposed.scale / other_decomposed.scale
raise UnitConversionError(
f"'{self!r}' is not a scaled version of '{other!r}'")
def to(self, other, value=UNITY, equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : unit-like
The unit to convert to.
value : int, float, or scalar array-like, optional
Value(s) in the current unit to be converted to the
specified unit. If not provided, defaults to 1.0
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
UnitsError
If units are inconsistent
"""
if other is self and value is UNITY:
return UNITY
else:
return self._get_converter(Unit(other),
equivalencies=equivalencies)(value)
def in_units(self, other, value=1.0, equivalencies=[]):
"""
Alias for `to` for backward compatibility with pynbody.
"""
return self.to(
other, value=value, equivalencies=equivalencies)
def decompose(self, bases=set()):
"""
Return a unit object composed of only irreducible units.
Parameters
----------
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `UnitsError` if it's not possible
to do so.
Returns
-------
unit : `~astropy.units.CompositeUnit`
New object containing only irreducible unit objects.
"""
raise NotImplementedError()
def _compose(self, equivalencies=[], namespace=[], max_depth=2, depth=0,
cached_results=None):
def is_final_result(unit):
# Returns True if this result contains only the expected
# units
for base in unit.bases:
if base not in namespace:
return False
return True
unit = self.decompose()
key = hash(unit)
cached = cached_results.get(key)
if cached is not None:
if isinstance(cached, Exception):
raise cached
return cached
# Prevent too many levels of recursion
# And special case for dimensionless unit
if depth >= max_depth:
cached_results[key] = [unit]
return [unit]
# Make a list including all of the equivalent units
units = [unit]
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self._is_equivalent(funit):
scale = funit.decompose().scale / unit.scale
units.append(Unit(a(1.0 / scale) * tunit).decompose())
elif self._is_equivalent(tunit):
scale = tunit.decompose().scale / unit.scale
units.append(Unit(b(1.0 / scale) * funit).decompose())
else:
if self._is_equivalent(funit):
units.append(Unit(unit.scale))
# Store partial results
partial_results = []
# Store final results that reduce to a single unit or pair of
# units
if len(unit.bases) == 0:
final_results = [{unit}, set()]
else:
final_results = [set(), set()]
for tunit in namespace:
tunit_decomposed = tunit.decompose()
for u in units:
# If the unit is a base unit, look for an exact match
# to one of the bases of the target unit. If found,
# factor by the same power as the target unit's base.
# This allows us to factor out fractional powers
# without needing to do an exhaustive search.
if len(tunit_decomposed.bases) == 1:
for base, power in zip(u.bases, u.powers):
if tunit_decomposed._is_equivalent(base):
tunit = tunit ** power
tunit_decomposed = tunit_decomposed ** power
break
composed = (u / tunit_decomposed).decompose()
factored = composed * tunit
len_bases = len(composed.bases)
if is_final_result(factored) and len_bases <= 1:
final_results[len_bases].add(factored)
else:
partial_results.append(
(len_bases, composed, tunit))
# Do we have any minimal results?
for final_result in final_results:
if len(final_result):
results = final_results[0].union(final_results[1])
cached_results[key] = results
return results
partial_results.sort(key=operator.itemgetter(0))
# ...we have to recurse and try to further compose
results = []
for len_bases, composed, tunit in partial_results:
try:
composed_list = composed._compose(
equivalencies=equivalencies,
namespace=namespace,
max_depth=max_depth, depth=depth + 1,
cached_results=cached_results)
except UnitsError:
composed_list = []
for subcomposed in composed_list:
results.append(
(len(subcomposed.bases), subcomposed, tunit))
if len(results):
results.sort(key=operator.itemgetter(0))
min_length = results[0][0]
subresults = set()
for len_bases, composed, tunit in results:
if len_bases > min_length:
break
else:
factored = composed * tunit
if is_final_result(factored):
subresults.add(factored)
if len(subresults):
cached_results[key] = subresults
return subresults
if not is_final_result(self):
result = UnitsError(
f"Cannot represent unit {self} in terms of the given units")
cached_results[key] = result
raise result
cached_results[key] = [self]
return [self]
def compose(self, equivalencies=[], units=None, max_depth=2,
include_prefix_units=None):
"""
Return the simplest possible composite unit(s) that represent
the given unit. Since there may be multiple equally simple
compositions of the unit, a list of units is always returned.
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also list. See
:ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
units : set of `~astropy.units.Unit`, optional
If not provided, any known units may be used to compose
into. Otherwise, ``units`` is a dict, module or sequence
containing the units to compose into.
max_depth : int, optional
The maximum recursion depth to use when composing into
composite units.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `True` if a sequence is passed in to ``units``,
`False` otherwise.
Returns
-------
units : list of `CompositeUnit`
A list of candidate compositions. These will all be
equally simple, but it may not be possible to
automatically determine which of the candidates are
better.
"""
# if units parameter is specified and is a sequence (list|tuple),
# include_prefix_units is turned on by default. Ex: units=[u.kpc]
if include_prefix_units is None:
include_prefix_units = isinstance(units, (list, tuple))
# Pre-normalize the equivalencies list
equivalencies = self._normalize_equivalencies(equivalencies)
# The namespace of units to compose into should be filtered to
# only include units with bases in common with self, otherwise
# they can't possibly provide useful results. Having too many
# destination units greatly increases the search space.
def has_bases_in_common(a, b):
if len(a.bases) == 0 and len(b.bases) == 0:
return True
for ab in a.bases:
for bb in b.bases:
if ab == bb:
return True
return False
def has_bases_in_common_with_equiv(unit, other):
if has_bases_in_common(unit, other):
return True
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if unit._is_equivalent(funit):
if has_bases_in_common(tunit.decompose(), other):
return True
elif unit._is_equivalent(tunit):
if has_bases_in_common(funit.decompose(), other):
return True
else:
if unit._is_equivalent(funit):
if has_bases_in_common(dimensionless_unscaled, other):
return True
return False
def filter_units(units):
filtered_namespace = set()
for tunit in units:
if (isinstance(tunit, UnitBase) and
(include_prefix_units or
not isinstance(tunit, PrefixUnit)) and
has_bases_in_common_with_equiv(
decomposed, tunit.decompose())):
filtered_namespace.add(tunit)
return filtered_namespace
decomposed = self.decompose()
if units is None:
units = filter_units(self._get_units_with_same_physical_type(
equivalencies=equivalencies))
if len(units) == 0:
units = get_current_unit_registry().non_prefix_units
elif isinstance(units, dict):
units = set(filter_units(units.values()))
elif inspect.ismodule(units):
units = filter_units(vars(units).values())
else:
units = filter_units(_flatten_units_collection(units))
def sort_results(results):
if not len(results):
return []
# Sort the results so the simplest ones appear first.
# Simplest is defined as "the minimum sum of absolute
# powers" (i.e. the fewest bases), and preference should
# be given to results where the sum of powers is positive
# and the scale is exactly equal to 1.0
results = list(results)
results.sort(key=lambda x: np.abs(x.scale))
results.sort(key=lambda x: np.sum(np.abs(x.powers)))
results.sort(key=lambda x: np.sum(x.powers) < 0.0)
results.sort(key=lambda x: not is_effectively_unity(x.scale))
last_result = results[0]
filtered = [last_result]
for result in results[1:]:
if str(result) != str(last_result):
filtered.append(result)
last_result = result
return filtered
return sort_results(self._compose(
equivalencies=equivalencies, namespace=units,
max_depth=max_depth, depth=0, cached_results={}))
def to_system(self, system):
"""
Converts this unit into ones belonging to the given system.
Since more than one result may be possible, a list is always
returned.
Parameters
----------
system : module
The module that defines the unit system. Commonly used
ones include `astropy.units.si` and `astropy.units.cgs`.
To use your own module it must contain unit objects and a
sequence member named ``bases`` containing the base units of
the system.
Returns
-------
units : list of `CompositeUnit`
The list is ranked so that units containing only the base
units of that system will appear first.
"""
bases = set(system.bases)
def score(compose):
# In case that compose._bases has no elements we return
# 'np.inf' as 'score value'. It does not really matter which
# number we would return. This case occurs for instance for
# dimensionless quantities:
compose_bases = compose.bases
if len(compose_bases) == 0:
return np.inf
else:
sum = 0
for base in compose_bases:
if base in bases:
sum += 1
return sum / float(len(compose_bases))
x = self.decompose(bases=bases)
composed = x.compose(units=system)
composed = sorted(composed, key=score, reverse=True)
return composed
@lazyproperty
def si(self):
"""
Returns a copy of the current `Unit` instance in SI units.
"""
from . import si
return self.to_system(si)[0]
@lazyproperty
def cgs(self):
"""
Returns a copy of the current `Unit` instance with CGS units.
"""
from . import cgs
return self.to_system(cgs)[0]
@property
def physical_type(self):
"""
Physical type(s) dimensionally compatible with the unit.
Returns
-------
`~astropy.units.physical.PhysicalType`
A representation of the physical type(s) of a unit.
Examples
--------
>>> from astropy import units as u
>>> u.m.physical_type
PhysicalType('length')
>>> (u.m ** 2 / u.s).physical_type
PhysicalType({'diffusivity', 'kinematic viscosity'})
Physical types can be compared to other physical types
(recommended in packages) or to strings.
>>> area = (u.m ** 2).physical_type
>>> area == u.m.physical_type ** 2
True
>>> area == "area"
True
`~astropy.units.physical.PhysicalType` objects can be used for
dimensional analysis.
>>> number_density = u.m.physical_type ** -3
>>> velocity = (u.m / u.s).physical_type
>>> number_density * velocity
PhysicalType('particle flux')
"""
from . import physical
return physical.get_physical_type(self)
def _get_units_with_same_physical_type(self, equivalencies=[]):
"""
Return a list of registered units with the same physical type
as this unit.
This function is used by Quantity to add its built-in
conversions to equivalent units.
This is a private method, since end users should be encouraged
to use the more powerful `compose` and `find_equivalent_units`
methods (which use this under the hood).
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also pull options from.
See :ref:`astropy:unit_equivalencies`. It must already be
normalized using `_normalize_equivalencies`.
"""
unit_registry = get_current_unit_registry()
units = set(unit_registry.get_units_with_physical_type(self))
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self.is_equivalent(funit) and tunit not in units:
units.update(
unit_registry.get_units_with_physical_type(tunit))
if self._is_equivalent(tunit) and funit not in units:
units.update(
unit_registry.get_units_with_physical_type(funit))
else:
if self.is_equivalent(funit):
units.add(dimensionless_unscaled)
return units
class EquivalentUnitsList(list):
"""
A class to handle pretty-printing the result of
`find_equivalent_units`.
"""
HEADING_NAMES = ('Primary name', 'Unit definition', 'Aliases')
ROW_LEN = 3 # len(HEADING_NAMES), but hard-code since it is constant
NO_EQUIV_UNITS_MSG = 'There are no equivalent units'
def __repr__(self):
if len(self) == 0:
return self.NO_EQUIV_UNITS_MSG
else:
lines = self._process_equivalent_units(self)
lines.insert(0, self.HEADING_NAMES)
widths = [0] * self.ROW_LEN
for line in lines:
for i, col in enumerate(line):
widths[i] = max(widths[i], len(col))
f = " {{0:<{0}s}} | {{1:<{1}s}} | {{2:<{2}s}}".format(*widths)
lines = [f.format(*line) for line in lines]
lines = (lines[0:1] +
['['] +
[f'{x} ,' for x in lines[1:]] +
[']'])
return '\n'.join(lines)
def _repr_html_(self):
"""
Outputs a HTML table representation within Jupyter notebooks.
"""
if len(self) == 0:
return f"<p>{self.NO_EQUIV_UNITS_MSG}</p>"
else:
# HTML tags to use to compose the table in HTML
blank_table = '<table style="width:50%">{}</table>'
blank_row_container = "<tr>{}</tr>"
heading_row_content = "<th>{}</th>" * self.ROW_LEN
data_row_content = "<td>{}</td>" * self.ROW_LEN
# The HTML will be rendered & the table is simple, so don't
# bother to include newlines & indentation for the HTML code.
heading_row = blank_row_container.format(
heading_row_content.format(*self.HEADING_NAMES))
data_rows = self._process_equivalent_units(self)
all_rows = heading_row
for row in data_rows:
html_row = blank_row_container.format(
data_row_content.format(*row))
all_rows += html_row
return blank_table.format(all_rows)
@staticmethod
def _process_equivalent_units(equiv_units_data):
"""
Extract attributes, and sort, the equivalent units pre-formatting.
"""
processed_equiv_units = []
for u in equiv_units_data:
irred = u.decompose().to_string()
if irred == u.name:
irred = 'irreducible'
processed_equiv_units.append(
(u.name, irred, ', '.join(u.aliases)))
processed_equiv_units.sort()
return processed_equiv_units
def find_equivalent_units(self, equivalencies=[], units=None,
include_prefix_units=False):
"""
Return a list of all the units that are the same type as ``self``.
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also list. See
:ref:`astropy:unit_equivalencies`.
Any list given, including an empty one, supersedes global defaults
that may be in effect (as set by `set_enabled_equivalencies`)
units : set of `~astropy.units.Unit`, optional
If not provided, all defined units will be searched for
equivalencies. Otherwise, may be a dict, module or
sequence containing the units to search for equivalencies.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `False`.
Returns
-------
units : list of `UnitBase`
A list of unit objects that match ``u``. A subclass of
`list` (``EquivalentUnitsList``) is returned that
pretty-prints the list of units when output.
"""
results = self.compose(
equivalencies=equivalencies, units=units, max_depth=1,
include_prefix_units=include_prefix_units)
results = {x.bases[0] for x in results if len(x.bases) == 1}
return self.EquivalentUnitsList(results)
def is_unity(self):
"""
Returns `True` if the unit is unscaled and dimensionless.
"""
return False
class NamedUnit(UnitBase):
"""
The base class of units that have a name.
Parameters
----------
st : str, list of str, 2-tuple
The name of the unit. If a list of strings, the first element
is the canonical (short) name, and the rest of the elements
are aliases. If a tuple of lists, the first element is a list
of short names, and the second element is a list of long
names; all but the first short name are considered "aliases".
Each name *should* be a valid Python identifier to make it
easy to access, but this is not required.
namespace : dict, optional
When provided, inject the unit, and all of its aliases, in the
given namespace dictionary. If a unit by the same name is
already in the namespace, a ValueError is raised.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, doc=None, format=None, namespace=None):
UnitBase.__init__(self)
if isinstance(st, (bytes, str)):
self._names = [st]
self._short_names = [st]
self._long_names = []
elif isinstance(st, tuple):
if not len(st) == 2:
raise ValueError("st must be string, list or 2-tuple")
self._names = st[0] + [n for n in st[1] if n not in st[0]]
if not len(self._names):
raise ValueError("must provide at least one name")
self._short_names = st[0][:]
self._long_names = st[1][:]
else:
if len(st) == 0:
raise ValueError(
"st list must have at least one entry")
self._names = st[:]
self._short_names = [st[0]]
self._long_names = st[1:]
if format is None:
format = {}
self._format = format
if doc is None:
doc = self._generate_doc()
else:
doc = textwrap.dedent(doc)
doc = textwrap.fill(doc)
self.__doc__ = doc
self._inject(namespace)
def _generate_doc(self):
"""
Generate a docstring for the unit if the user didn't supply
one. This is only used from the constructor and may be
overridden in subclasses.
"""
names = self.names
if len(self.names) > 1:
return "{1} ({0})".format(*names[:2])
else:
return names[0]
def get_format_name(self, format):
"""
Get a name for this unit that is specific to a particular
format.
Uses the dictionary passed into the `format` kwarg in the
constructor.
Parameters
----------
format : str
The name of the format
Returns
-------
name : str
The name of the unit for the given format.
"""
return self._format.get(format, self.name)
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
return self._names
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
return self._names[0]
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
return self._names[1:]
@property
def short_names(self):
"""
Returns all of the short names associated with this unit.
"""
return self._short_names
@property
def long_names(self):
"""
Returns all of the long names associated with this unit.
"""
return self._long_names
def _inject(self, namespace=None):
"""
Injects the unit, and all of its aliases, in the given
namespace dictionary.
"""
if namespace is None:
return
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for name in self._names:
if name in namespace and self != namespace[name]:
raise ValueError(
"Object with name {!r} already exists in "
"given namespace ({!r}).".format(
name, namespace[name]))
for name in self._names:
namespace[name] = self
def _recreate_irreducible_unit(cls, names, registered):
"""
This is used to reconstruct units when passed around by
multiprocessing.
"""
registry = get_current_unit_registry().registry
if names[0] in registry:
# If in local registry return that object.
return registry[names[0]]
else:
# otherwise, recreate the unit.
unit = cls(names)
if registered:
# If not in local registry but registered in origin registry,
# enable unit in local registry.
get_current_unit_registry().add_enabled_units([unit])
return unit
class IrreducibleUnit(NamedUnit):
"""
Irreducible units are the units that all other units are defined
in terms of.
Examples are meters, seconds, kilograms, amperes, etc. There is
only once instance of such a unit per type.
"""
def __reduce__(self):
# When IrreducibleUnit objects are passed to other processes
# over multiprocessing, they need to be recreated to be the
# ones already in the subprocesses' namespace, not new
# objects, or they will be considered "unconvertible".
# Therefore, we have a custom pickler/unpickler that
# understands how to recreate the Unit on the other side.
registry = get_current_unit_registry().registry
return (_recreate_irreducible_unit,
(self.__class__, list(self.names), self.name in registry),
self.__getstate__())
@property
def represents(self):
"""The unit that this named unit represents.
For an irreducible unit, that is always itself.
"""
return self
def decompose(self, bases=set()):
if len(bases) and self not in bases:
for base in bases:
try:
scale = self._to(base)
except UnitsError:
pass
else:
if is_effectively_unity(scale):
return base
else:
return CompositeUnit(scale, [base], [1],
_error_check=False)
raise UnitConversionError(
f"Unit {self} can not be decomposed into the requested bases")
return self
class UnrecognizedUnit(IrreducibleUnit):
"""
A unit that did not parse correctly. This allows for
round-tripping it as a string, but no unit operations actually work
on it.
Parameters
----------
st : str
The name of the unit.
"""
# For UnrecognizedUnits, we want to use "standard" Python
# pickling, not the special case that is used for
# IrreducibleUnits.
__reduce__ = object.__reduce__
def __repr__(self):
return f"UnrecognizedUnit({str(self)})"
def __bytes__(self):
return self.name.encode('ascii', 'replace')
def __str__(self):
return self.name
def to_string(self, format=None):
return self.name
def _unrecognized_operator(self, *args, **kwargs):
raise ValueError(
"The unit {!r} is unrecognized, so all arithmetic operations "
"with it are invalid.".format(self.name))
__pow__ = __truediv__ = __rtruediv__ = __mul__ = __rmul__ = __lt__ = \
__gt__ = __le__ = __ge__ = __neg__ = _unrecognized_operator
def __eq__(self, other):
try:
other = Unit(other, parse_strict='silent')
except (ValueError, UnitsError, TypeError):
return NotImplemented
return isinstance(other, type(self)) and self.name == other.name
def __ne__(self, other):
return not (self == other)
def is_equivalent(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
return self == other
def _get_converter(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
raise ValueError(
"The unit {!r} is unrecognized. It can not be converted "
"to other units.".format(self.name))
def get_format_name(self, format):
return self.name
def is_unity(self):
return False
class _UnitMetaClass(type):
"""
This metaclass exists because the Unit constructor should
sometimes return instances that already exist. This "overrides"
the constructor before the new instance is actually created, so we
can return an existing one.
"""
def __call__(self, s="", represents=None, format=None, namespace=None,
doc=None, parse_strict='raise'):
# Short-circuit if we're already a unit
if hasattr(s, '_get_physical_type_id'):
return s
# turn possible Quantity input for s or represents into a Unit
from .quantity import Quantity
if isinstance(represents, Quantity):
if is_effectively_unity(represents.value):
represents = represents.unit
else:
represents = CompositeUnit(represents.value *
represents.unit.scale,
bases=represents.unit.bases,
powers=represents.unit.powers,
_error_check=False)
if isinstance(s, Quantity):
if is_effectively_unity(s.value):
s = s.unit
else:
s = CompositeUnit(s.value * s.unit.scale,
bases=s.unit.bases,
powers=s.unit.powers,
_error_check=False)
# now decide what we really need to do; define derived Unit?
if isinstance(represents, UnitBase):
# This has the effect of calling the real __new__ and
# __init__ on the Unit class.
return super().__call__(
s, represents, format=format, namespace=namespace, doc=doc)
# or interpret a Quantity (now became unit), string or number?
if isinstance(s, UnitBase):
return s
elif isinstance(s, (bytes, str)):
if len(s.strip()) == 0:
# Return the NULL unit
return dimensionless_unscaled
if format is None:
format = unit_format.Generic
f = unit_format.get_format(format)
if isinstance(s, bytes):
s = s.decode('ascii')
try:
return f.parse(s)
except NotImplementedError:
raise
except Exception as e:
if parse_strict == 'silent':
pass
else:
# Deliberately not issubclass here. Subclasses
# should use their name.
if f is not unit_format.Generic:
format_clause = f.name + ' '
else:
format_clause = ''
msg = ("'{}' did not parse as {}unit: {} "
"If this is meant to be a custom unit, "
"define it with 'u.def_unit'. To have it "
"recognized inside a file reader or other code, "
"enable it with 'u.add_enabled_units'. "
"For details, see "
"https://docs.astropy.org/en/latest/units/combining_and_defining.html"
.format(s, format_clause, str(e)))
if parse_strict == 'raise':
raise ValueError(msg)
elif parse_strict == 'warn':
warnings.warn(msg, UnitsWarning)
else:
raise ValueError("'parse_strict' must be 'warn', "
"'raise' or 'silent'")
return UnrecognizedUnit(s)
elif isinstance(s, (int, float, np.floating, np.integer)):
return CompositeUnit(s, [], [], _error_check=False)
elif isinstance(s, tuple):
from .structured import StructuredUnit
return StructuredUnit(s)
elif s is None:
raise TypeError("None is not a valid Unit")
else:
raise TypeError(f"{s} can not be converted to a Unit")
class Unit(NamedUnit, metaclass=_UnitMetaClass):
"""
The main unit class.
There are a number of different ways to construct a Unit, but
always returns a `UnitBase` instance. If the arguments refer to
an already-existing unit, that existing unit instance is returned,
rather than a new one.
- From a string::
Unit(s, format=None, parse_strict='silent')
Construct from a string representing a (possibly compound) unit.
The optional `format` keyword argument specifies the format the
string is in, by default ``"generic"``. For a description of
the available formats, see `astropy.units.format`.
The optional ``parse_strict`` keyword controls what happens when an
unrecognized unit string is passed in. It may be one of the following:
- ``'raise'``: (default) raise a ValueError exception.
- ``'warn'``: emit a Warning, and return an
`UnrecognizedUnit` instance.
- ``'silent'``: return an `UnrecognizedUnit` instance.
- From a number::
Unit(number)
Creates a dimensionless unit.
- From a `UnitBase` instance::
Unit(unit)
Returns the given unit unchanged.
- From no arguments::
Unit()
Returns the dimensionless unit.
- The last form, which creates a new `Unit` is described in detail
below.
See also: https://docs.astropy.org/en/stable/units/
Parameters
----------
st : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance
The unit that this named unit represents.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
namespace : dict, optional
When provided, inject the unit (and all of its aliases) into
the given namespace.
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, represents=None, doc=None,
format=None, namespace=None):
represents = Unit(represents)
self._represents = represents
NamedUnit.__init__(self, st, namespace=namespace, doc=doc,
format=format)
@property
def represents(self):
"""The unit that this named unit represents."""
return self._represents
def decompose(self, bases=set()):
return self._represents.decompose(bases=bases)
def is_unity(self):
return self._represents.is_unity()
def __hash__(self):
if self._hash is None:
self._hash = hash((self.name, self._represents))
return self._hash
@classmethod
def _from_physical_type_id(cls, physical_type_id):
# get string bases and powers from the ID tuple
bases = [cls(base) for base, _ in physical_type_id]
powers = [power for _, power in physical_type_id]
if len(physical_type_id) == 1 and powers[0] == 1:
unit = bases[0]
else:
unit = CompositeUnit(1, bases, powers,
_error_check=False)
return unit
class PrefixUnit(Unit):
"""
A unit that is simply a SI-prefixed version of another unit.
For example, ``mm`` is a `PrefixUnit` of ``.001 * m``.
The constructor is the same as for `Unit`.
"""
class CompositeUnit(UnitBase):
"""
Create a composite unit using expressions of previously defined
units.
Direct use of this class is not recommended. Instead use the
factory function `Unit` and arithmetic operators to compose
units.
Parameters
----------
scale : number
A scaling factor for the unit.
bases : sequence of `UnitBase`
A sequence of units this unit is composed of.
powers : sequence of numbers
A sequence of powers (in parallel with ``bases``) for each
of the base units.
"""
_decomposed_cache = None
def __init__(self, scale, bases, powers, decompose=False,
decompose_bases=set(), _error_check=True):
# There are many cases internal to astropy.units where we
# already know that all the bases are Unit objects, and the
# powers have been validated. In those cases, we can skip the
# error checking for performance reasons. When the private
# kwarg `_error_check` is False, the error checking is turned
# off.
if _error_check:
for base in bases:
if not isinstance(base, UnitBase):
raise TypeError(
"bases must be sequence of UnitBase instances")
powers = [validate_power(p) for p in powers]
if not decompose and len(bases) == 1 and powers[0] >= 0:
# Short-cut; with one unit there's nothing to expand and gather,
# as that has happened already when creating the unit. But do only
# positive powers, since for negative powers we need to re-sort.
unit = bases[0]
power = powers[0]
if power == 1:
scale *= unit.scale
self._bases = unit.bases
self._powers = unit.powers
elif power == 0:
self._bases = []
self._powers = []
else:
scale *= unit.scale ** power
self._bases = unit.bases
self._powers = [operator.mul(*resolve_fractions(p, power))
for p in unit.powers]
self._scale = sanitize_scale(scale)
else:
# Regular case: use inputs as preliminary scale, bases, and powers,
# then "expand and gather" identical bases, sanitize the scale, &c.
self._scale = scale
self._bases = bases
self._powers = powers
self._expand_and_gather(decompose=decompose,
bases=decompose_bases)
def __repr__(self):
if len(self._bases):
return super().__repr__()
else:
if self._scale != 1.0:
return f'Unit(dimensionless with a scale of {self._scale})'
else:
return 'Unit(dimensionless)'
@property
def scale(self):
"""
Return the scale of the composite unit.
"""
return self._scale
@property
def bases(self):
"""
Return the bases of the composite unit.
"""
return self._bases
@property
def powers(self):
"""
Return the powers of the composite unit.
"""
return self._powers
def _expand_and_gather(self, decompose=False, bases=set()):
def add_unit(unit, power, scale):
if bases and unit not in bases:
for base in bases:
try:
scale *= unit._to(base) ** power
except UnitsError:
pass
else:
unit = base
break
if unit in new_parts:
a, b = resolve_fractions(new_parts[unit], power)
new_parts[unit] = a + b
else:
new_parts[unit] = power
return scale
new_parts = {}
scale = self._scale
for b, p in zip(self._bases, self._powers):
if decompose and b not in bases:
b = b.decompose(bases=bases)
if isinstance(b, CompositeUnit):
scale *= b._scale ** p
for b_sub, p_sub in zip(b._bases, b._powers):
a, b = resolve_fractions(p_sub, p)
scale = add_unit(b_sub, a * b, scale)
else:
scale = add_unit(b, p, scale)
new_parts = [x for x in new_parts.items() if x[1] != 0]
new_parts.sort(key=lambda x: (-x[1], getattr(x[0], 'name', '')))
self._bases = [x[0] for x in new_parts]
self._powers = [x[1] for x in new_parts]
self._scale = sanitize_scale(scale)
def __copy__(self):
"""
For compatibility with python copy module.
"""
return CompositeUnit(self._scale, self._bases[:], self._powers[:])
def decompose(self, bases=set()):
if len(bases) == 0 and self._decomposed_cache is not None:
return self._decomposed_cache
for base in self.bases:
if (not isinstance(base, IrreducibleUnit) or
(len(bases) and base not in bases)):
break
else:
if len(bases) == 0:
self._decomposed_cache = self
return self
x = CompositeUnit(self.scale, self.bases, self.powers, decompose=True,
decompose_bases=bases)
if len(bases) == 0:
self._decomposed_cache = x
return x
def is_unity(self):
unit = self.decompose()
return len(unit.bases) == 0 and unit.scale == 1.0
si_prefixes = [
(['Y'], ['yotta'], 1e24),
(['Z'], ['zetta'], 1e21),
(['E'], ['exa'], 1e18),
(['P'], ['peta'], 1e15),
(['T'], ['tera'], 1e12),
(['G'], ['giga'], 1e9),
(['M'], ['mega'], 1e6),
(['k'], ['kilo'], 1e3),
(['h'], ['hecto'], 1e2),
(['da'], ['deka', 'deca'], 1e1),
(['d'], ['deci'], 1e-1),
(['c'], ['centi'], 1e-2),
(['m'], ['milli'], 1e-3),
(['u'], ['micro'], 1e-6),
(['n'], ['nano'], 1e-9),
(['p'], ['pico'], 1e-12),
(['f'], ['femto'], 1e-15),
(['a'], ['atto'], 1e-18),
(['z'], ['zepto'], 1e-21),
(['y'], ['yocto'], 1e-24)
]
binary_prefixes = [
(['Ki'], ['kibi'], 2. ** 10),
(['Mi'], ['mebi'], 2. ** 20),
(['Gi'], ['gibi'], 2. ** 30),
(['Ti'], ['tebi'], 2. ** 40),
(['Pi'], ['pebi'], 2. ** 50),
(['Ei'], ['exbi'], 2. ** 60)
]
def _add_prefixes(u, excludes=[], namespace=None, prefixes=False):
"""
Set up all of the standard metric prefixes for a unit. This
function should not be used directly, but instead use the
`prefixes` kwarg on `def_unit`.
Parameters
----------
excludes : list of str, optional
Any prefixes to exclude from creation to avoid namespace
collisions.
namespace : dict, optional
When provided, inject the unit (and all of its aliases) into
the given namespace dictionary.
prefixes : list, optional
When provided, it is a list of prefix definitions of the form:
(short_names, long_tables, factor)
"""
if prefixes is True:
prefixes = si_prefixes
elif prefixes is False:
prefixes = []
for short, full, factor in prefixes:
names = []
format = {}
for prefix in short:
if prefix in excludes:
continue
for alias in u.short_names:
names.append(prefix + alias)
# This is a hack to use Greek mu as a prefix
# for some formatters.
if prefix == 'u':
format['latex'] = r'\mu ' + u.get_format_name('latex')
format['unicode'] = '\N{MICRO SIGN}' + u.get_format_name('unicode')
for key, val in u._format.items():
format.setdefault(key, prefix + val)
for prefix in full:
if prefix in excludes:
continue
for alias in u.long_names:
names.append(prefix + alias)
if len(names):
PrefixUnit(names, CompositeUnit(factor, [u], [1],
_error_check=False),
namespace=namespace, format=format)
def def_unit(s, represents=None, doc=None, format=None, prefixes=False,
exclude_prefixes=[], namespace=None):
"""
Factory function for defining new units.
Parameters
----------
s : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance, optional
The unit that this named unit represents. If not provided,
a new `IrreducibleUnit` is created.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to
have it displayed as ``\\Omega`` by the ``latex``
formatter. In that case, `format` argument should be set
to::
{'latex': r'\\Omega'}
prefixes : bool or list, optional
When `True`, generate all of the SI prefixed versions of the
unit as well. For example, for a given unit ``m``, will
generate ``mm``, ``cm``, ``km``, etc. When a list, it is a list of
prefix definitions of the form:
(short_names, long_tables, factor)
Default is `False`. This function always returns the base
unit object, even if multiple scaled versions of the unit were
created.
exclude_prefixes : list of str, optional
If any of the SI prefixes need to be excluded, they may be
listed here. For example, ``Pa`` can be interpreted either as
"petaannum" or "Pascal". Therefore, when defining the
prefixes for ``a``, ``exclude_prefixes`` should be set to
``["P"]``.
namespace : dict, optional
When provided, inject the unit (and all of its aliases and
prefixes), into the given namespace dictionary.
Returns
-------
unit : `~astropy.units.UnitBase`
The newly-defined unit, or a matching unit that was already
defined.
"""
if represents is not None:
result = Unit(s, represents, namespace=namespace, doc=doc,
format=format)
else:
result = IrreducibleUnit(
s, namespace=namespace, doc=doc, format=format)
if prefixes:
_add_prefixes(result, excludes=exclude_prefixes, namespace=namespace,
prefixes=prefixes)
return result
def _condition_arg(value):
"""
Validate value is acceptable for conversion purposes.
Will convert into an array if not a scalar, and can be converted
into an array
Parameters
----------
value : int or float value, or sequence of such values
Returns
-------
Scalar value or numpy array
Raises
------
ValueError
If value is not as expected
"""
if isinstance(value, (np.ndarray, float, int, complex, np.void)):
return value
avalue = np.array(value)
if avalue.dtype.kind not in ['i', 'f', 'c']:
raise ValueError("Value not scalar compatible or convertible to "
"an int, float, or complex array")
return avalue
def unit_scale_converter(val):
"""Function that just multiplies the value by unity.
This is a separate function so it can be recognized and
discarded in unit conversion.
"""
return 1. * _condition_arg(val)
dimensionless_unscaled = CompositeUnit(1, [], [], _error_check=False)
# Abbreviation of the above, see #1980
one = dimensionless_unscaled
# Maintain error in old location for backward compatibility
# TODO: Is this still needed? Should there be a deprecation warning?
unit_format.fits.UnitScaleError = UnitScaleError
|
219e35036e67c0c0459caf1f3d88d2eca18d0df1fbdd0eea05c5021f6468202a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines SI prefixed units that are required by the VOUnit standard
but that are rarely used in practice and liable to lead to confusion (such as
``msolMass`` for milli-solar mass). They are in a separate module from
`astropy.units.deprecated` because they need to be enabled by default for
`astropy.units` to parse compliant VOUnit strings. As a result, e.g.,
``Unit('msolMass')`` will just work, but to access the unit directly, use
``astropy.units.required_by_vounit.msolMass`` instead of the more typical idiom
possible for the non-prefixed unit, ``astropy.units.solMass``.
"""
_ns = globals()
def _initialize_module():
# Local imports to avoid polluting top-level namespace
from . import astrophys, cgs
from .core import _add_prefixes, def_unit
_add_prefixes(astrophys.solMass, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.solRad, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.solLum, namespace=_ns, prefixes=True)
_initialize_module()
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_prefixonly_unit_summary as _generate_prefixonly_unit_summary
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
__doc__ += _generate_prefixonly_unit_summary(globals())
def _enable():
"""
Enable the VOUnit-required extra units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`, and are recognized in the ``Unit('...')``
idiom.
"""
# Local import to avoid cyclical import
# Local import to avoid polluting namespace
import inspect
from .core import add_enabled_units
return add_enabled_units(inspect.getmodule(_enable))
# Because these are VOUnit mandated units, they start enabled (which is why the
# function is hidden).
_enable()
|
b87249b76d9cd3855bb3a1c11f023eda9852c2e30e13f71d270f551f41582d8c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines the astrophysics-specific units. They are also
available in the `astropy.units` namespace.
"""
from astropy.constants import si as _si
from . import si
from .core import UnitBase, binary_prefixes, def_unit, set_enabled_units, si_prefixes
# To ensure si units of the constants can be interpreted.
set_enabled_units([si])
import numpy as _numpy
_ns = globals()
###########################################################################
# LENGTH
def_unit((['AU', 'au'], ['astronomical_unit']), _si.au, namespace=_ns, prefixes=True,
doc="astronomical unit: approximately the mean Earth--Sun "
"distance.")
def_unit(['pc', 'parsec'], _si.pc, namespace=_ns, prefixes=True,
doc="parsec: approximately 3.26 light-years.")
def_unit(['solRad', 'R_sun', 'Rsun'], _si.R_sun, namespace=_ns,
doc="Solar radius", prefixes=False,
format={'latex': r'R_{\odot}', 'unicode': 'R\N{SUN}'})
def_unit(['jupiterRad', 'R_jup', 'Rjup', 'R_jupiter', 'Rjupiter'],
_si.R_jup, namespace=_ns, prefixes=False, doc="Jupiter radius",
# LaTeX jupiter symbol requires wasysym
format={'latex': r'R_{\rm J}', 'unicode': 'R\N{JUPITER}'})
def_unit(['earthRad', 'R_earth', 'Rearth'], _si.R_earth, namespace=_ns,
prefixes=False, doc="Earth radius",
# LaTeX earth symbol requires wasysym
format={'latex': r'R_{\oplus}', 'unicode': 'R⊕'})
def_unit(['lyr', 'lightyear'], (_si.c * si.yr).to(si.m),
namespace=_ns, prefixes=True, doc="Light year")
def_unit(['lsec', 'lightsecond'], (_si.c * si.s).to(si.m),
namespace=_ns, prefixes=False, doc="Light second")
###########################################################################
# MASS
def_unit(['solMass', 'M_sun', 'Msun'], _si.M_sun, namespace=_ns,
prefixes=False, doc="Solar mass",
format={'latex': r'M_{\odot}', 'unicode': 'M\N{SUN}'})
def_unit(['jupiterMass', 'M_jup', 'Mjup', 'M_jupiter', 'Mjupiter'],
_si.M_jup, namespace=_ns, prefixes=False, doc="Jupiter mass",
# LaTeX jupiter symbol requires wasysym
format={'latex': r'M_{\rm J}', 'unicode': 'M\N{JUPITER}'})
def_unit(['earthMass', 'M_earth', 'Mearth'], _si.M_earth, namespace=_ns,
prefixes=False, doc="Earth mass",
# LaTeX earth symbol requires wasysym
format={'latex': r'M_{\oplus}', 'unicode': 'M⊕'})
##########################################################################
# ENERGY
# Here, explicitly convert the planck constant to 'eV s' since the constant
# can override that to give a more precise value that takes into account
# covariances between e and h. Eventually, this may also be replaced with
# just `_si.Ryd.to(eV)`.
def_unit(['Ry', 'rydberg'],
(_si.Ryd * _si.c * _si.h.to(si.eV * si.s)).to(si.eV),
namespace=_ns, prefixes=True,
doc="Rydberg: Energy of a photon whose wavenumber is the Rydberg "
"constant",
format={'latex': r'R_{\infty}', 'unicode': 'R∞'})
###########################################################################
# ILLUMINATION
def_unit(['solLum', 'L_sun', 'Lsun'], _si.L_sun, namespace=_ns,
prefixes=False, doc="Solar luminance",
format={'latex': r'L_{\odot}', 'unicode': 'L\N{SUN}'})
###########################################################################
# SPECTRAL DENSITY
def_unit((['ph', 'photon'], ['photon']),
format={'ogip': 'photon', 'vounit': 'photon'},
namespace=_ns, prefixes=True)
def_unit(['Jy', 'Jansky', 'jansky'], 1e-26 * si.W / si.m ** 2 / si.Hz,
namespace=_ns, prefixes=True,
doc="Jansky: spectral flux density")
def_unit(['R', 'Rayleigh', 'rayleigh'],
(1e10 / (4 * _numpy.pi)) *
ph * si.m ** -2 * si.s ** -1 * si.sr ** -1,
namespace=_ns, prefixes=True,
doc="Rayleigh: photon flux")
###########################################################################
# EVENTS
def_unit((['ct', 'count'], ['count']),
format={'fits': 'count', 'ogip': 'count', 'vounit': 'count'},
namespace=_ns, prefixes=True, exclude_prefixes=['p'])
def_unit(['adu'], namespace=_ns, prefixes=True)
def_unit(['DN', 'dn'], namespace=_ns, prefixes=False)
###########################################################################
# MISCELLANEOUS
# Some of these are very FITS-specific and perhaps considered a mistake.
# Maybe they should be moved into the FITS format class?
# TODO: This is defined by the FITS standard as "relative to the sun".
# Is that mass, volume, what?
def_unit(['Sun'], namespace=_ns)
def_unit(['chan'], namespace=_ns, prefixes=True)
def_unit(['bin'], namespace=_ns, prefixes=True)
def_unit(['beam'], namespace=_ns, prefixes=True)
def_unit(['electron'], doc="Number of electrons", namespace=_ns,
format={'latex': r'e^{-}', 'unicode': 'e⁻'})
###########################################################################
# CLEANUP
del UnitBase
del def_unit
del si
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
# -------------------------------------------------------------------------
def __getattr__(attr):
if attr == "littleh":
import warnings
from astropy.cosmology.units import littleh
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
("`littleh` is deprecated from module `astropy.units.astrophys` "
"since astropy 5.0 and may be removed in a future version. "
"Use `astropy.cosmology.units.littleh` instead."),
AstropyDeprecationWarning)
return littleh
raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
|
0a09058c235717f4561587871b03f2733d0e77d5ddb1a8eb3dd543a5365c3ac1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Miscellaneous utilities for `astropy.units`.
None of the functions in the module are meant for use outside of the
package.
"""
import io
import re
from fractions import Fraction
import numpy as np
from numpy import finfo
_float_finfo = finfo(float)
# take float here to ensure comparison with another float is fast
# give a little margin since often multiple calculations happened
_JUST_BELOW_UNITY = float(1.-4.*_float_finfo.epsneg)
_JUST_ABOVE_UNITY = float(1.+4.*_float_finfo.eps)
def _get_first_sentence(s):
"""
Get the first sentence from a string and remove any carriage
returns.
"""
x = re.match(r".*?\S\.\s", s)
if x is not None:
s = x.group(0)
return s.replace('\n', ' ')
def _iter_unit_summary(namespace):
"""
Generates the ``(unit, doc, represents, aliases, prefixes)``
tuple used to format the unit summary docs in `generate_unit_summary`.
"""
from . import core
# Get all of the units, and keep track of which ones have SI
# prefixes
units = []
has_prefixes = set()
for key, val in namespace.items():
# Skip non-unit items
if not isinstance(val, core.UnitBase):
continue
# Skip aliases
if key != val.name:
continue
if isinstance(val, core.PrefixUnit):
# This will return the root unit that is scaled by the prefix
# attached to it
has_prefixes.add(val._represents.bases[0].name)
else:
units.append(val)
# Sort alphabetically, case insensitive
units.sort(key=lambda x: x.name.lower())
for unit in units:
doc = _get_first_sentence(unit.__doc__).strip()
represents = ''
if isinstance(unit, core.Unit):
represents = f":math:`{unit._represents.to_string('latex')[1:-1]}`"
aliases = ', '.join(f'``{x}``' for x in unit.aliases)
yield (unit, doc, represents, aliases, 'Yes' if unit.name in has_prefixes else 'No')
def generate_unit_summary(namespace):
"""
Generates a summary of units from a given namespace. This is used
to generate the docstring for the modules that define the actual
units.
Parameters
----------
namespace : dict
A namespace containing units.
Returns
-------
docstring : str
A docstring containing a summary table of the units.
"""
docstring = io.StringIO()
docstring.write("""
.. list-table:: Available Units
:header-rows: 1
:widths: 10 20 20 20 1
* - Unit
- Description
- Represents
- Aliases
- SI Prefixes
""")
for unit_summary in _iter_unit_summary(namespace):
docstring.write("""
* - ``{}``
- {}
- {}
- {}
- {}
""".format(*unit_summary))
return docstring.getvalue()
def generate_prefixonly_unit_summary(namespace):
"""
Generates table entries for units in a namespace that are just prefixes
without the base unit. Note that this is intended to be used *after*
`generate_unit_summary` and therefore does not include the table header.
Parameters
----------
namespace : dict
A namespace containing units that are prefixes but do *not* have the
base unit in their namespace.
Returns
-------
docstring : str
A docstring containing a summary table of the units.
"""
from . import PrefixUnit
faux_namespace = {}
for nm, unit in namespace.items():
if isinstance(unit, PrefixUnit):
base_unit = unit.represents.bases[0]
faux_namespace[base_unit.name] = base_unit
docstring = io.StringIO()
for unit_summary in _iter_unit_summary(faux_namespace):
docstring.write("""
* - Prefixes for ``{}``
- {} prefixes
- {}
- {}
- Only
""".format(*unit_summary))
return docstring.getvalue()
def is_effectively_unity(value):
# value is *almost* always real, except, e.g., for u.mag**0.5, when
# it will be complex. Use try/except to ensure normal case is fast
try:
return _JUST_BELOW_UNITY <= value <= _JUST_ABOVE_UNITY
except TypeError: # value is complex
return (_JUST_BELOW_UNITY <= value.real <= _JUST_ABOVE_UNITY and
_JUST_BELOW_UNITY <= value.imag + 1 <= _JUST_ABOVE_UNITY)
def sanitize_scale(scale):
if is_effectively_unity(scale):
return 1.0
# Maximum speed for regular case where scale is a float.
if scale.__class__ is float:
return scale
# We cannot have numpy scalars, since they don't autoconvert to
# complex if necessary. They are also slower.
if hasattr(scale, 'dtype'):
scale = scale.item()
# All classes that scale can be (int, float, complex, Fraction)
# have an "imag" attribute.
if scale.imag:
if abs(scale.real) > abs(scale.imag):
if is_effectively_unity(scale.imag/scale.real + 1):
return scale.real
elif is_effectively_unity(scale.real/scale.imag + 1):
return complex(0., scale.imag)
return scale
else:
return scale.real
def maybe_simple_fraction(p, max_denominator=100):
"""Fraction very close to x with denominator at most max_denominator.
The fraction has to be such that fraction/x is unity to within 4 ulp.
If such a fraction does not exist, returns the float number.
The algorithm is that of `fractions.Fraction.limit_denominator`, but
sped up by not creating a fraction to start with.
"""
if p == 0 or p.__class__ is int:
return p
n, d = p.as_integer_ratio()
a = n // d
# Normally, start with 0,1 and 1,0; here we have applied first iteration.
n0, d0 = 1, 0
n1, d1 = a, 1
while d1 <= max_denominator:
if _JUST_BELOW_UNITY <= n1/(d1*p) <= _JUST_ABOVE_UNITY:
return Fraction(n1, d1)
n, d = d, n-a*d
a = n // d
n0, n1 = n1, n0+a*n1
d0, d1 = d1, d0+a*d1
return p
def validate_power(p):
"""Convert a power to a floating point value, an integer, or a Fraction.
If a fractional power can be represented exactly as a floating point
number, convert it to a float, to make the math much faster; otherwise,
retain it as a `fractions.Fraction` object to avoid losing precision.
Conversely, if the value is indistinguishable from a rational number with a
low-numbered denominator, convert to a Fraction object.
Parameters
----------
p : float, int, Rational, Fraction
Power to be converted
"""
denom = getattr(p, 'denominator', None)
if denom is None:
try:
p = float(p)
except Exception:
if not np.isscalar(p):
raise ValueError("Quantities and Units may only be raised "
"to a scalar power")
else:
raise
# This returns either a (simple) Fraction or the same float.
p = maybe_simple_fraction(p)
# If still a float, nothing more to be done.
if isinstance(p, float):
return p
# Otherwise, check for simplifications.
denom = p.denominator
if denom == 1:
p = p.numerator
elif (denom & (denom - 1)) == 0:
# Above is a bit-twiddling hack to see if denom is a power of two.
# If so, float does not lose precision and will speed things up.
p = float(p)
return p
def resolve_fractions(a, b):
"""
If either input is a Fraction, convert the other to a Fraction
(at least if it does not have a ridiculous denominator).
This ensures that any operation involving a Fraction will use
rational arithmetic and preserve precision.
"""
# We short-circuit on the most common cases of int and float, since
# isinstance(a, Fraction) is very slow for any non-Fraction instances.
a_is_fraction = (a.__class__ is not int and a.__class__ is not float and
isinstance(a, Fraction))
b_is_fraction = (b.__class__ is not int and b.__class__ is not float and
isinstance(b, Fraction))
if a_is_fraction and not b_is_fraction:
b = maybe_simple_fraction(b)
elif not a_is_fraction and b_is_fraction:
a = maybe_simple_fraction(a)
return a, b
def quantity_asanyarray(a, dtype=None):
from .quantity import Quantity
if not isinstance(a, np.ndarray) and not np.isscalar(a) and any(isinstance(x, Quantity) for x in a):
return Quantity(a, dtype=dtype)
else:
# skip over some dtype deprecation deprecation.
dtype = np.float64 if dtype is np.inexact else dtype
return np.asanyarray(a, dtype=dtype)
|
b5e303b014b3ccadc9d0d04f1792954aa5814f945b58539e5aee858be8fb1b52 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines the CGS units. They are also available in the
top-level `astropy.units` namespace.
"""
from fractions import Fraction
from . import si
from .core import UnitBase, def_unit
_ns = globals()
def_unit(['cm', 'centimeter'], si.cm, namespace=_ns, prefixes=False)
g = si.g
s = si.s
C = si.C
rad = si.rad
sr = si.sr
cd = si.cd
K = si.K
deg_C = si.deg_C
mol = si.mol
##########################################################################
# ACCELERATION
def_unit(['Gal', 'gal'], cm / s ** 2, namespace=_ns, prefixes=True,
doc="Gal: CGS unit of acceleration")
##########################################################################
# ENERGY
# Use CGS definition of erg
def_unit(['erg'], g * cm ** 2 / s ** 2, namespace=_ns, prefixes=True,
doc="erg: CGS unit of energy")
##########################################################################
# FORCE
def_unit(['dyn', 'dyne'], g * cm / s ** 2, namespace=_ns,
prefixes=True,
doc="dyne: CGS unit of force")
##########################################################################
# PRESSURE
def_unit(['Ba', 'Barye', 'barye'], g / (cm * s ** 2), namespace=_ns,
prefixes=True,
doc="Barye: CGS unit of pressure")
##########################################################################
# DYNAMIC VISCOSITY
def_unit(['P', 'poise'], g / (cm * s), namespace=_ns,
prefixes=True,
doc="poise: CGS unit of dynamic viscosity")
##########################################################################
# KINEMATIC VISCOSITY
def_unit(['St', 'stokes'], cm ** 2 / s, namespace=_ns,
prefixes=True,
doc="stokes: CGS unit of kinematic viscosity")
##########################################################################
# WAVENUMBER
def_unit(['k', 'Kayser', 'kayser'], cm ** -1, namespace=_ns,
prefixes=True,
doc="kayser: CGS unit of wavenumber")
###########################################################################
# ELECTRICAL
def_unit(['D', 'Debye', 'debye'], Fraction(1, 3) * 1e-29 * C * si.m,
namespace=_ns, prefixes=True,
doc="Debye: CGS unit of electric dipole moment")
def_unit(['Fr', 'Franklin', 'statcoulomb', 'statC', 'esu'],
g ** Fraction(1, 2) * cm ** Fraction(3, 2) * s ** -1,
namespace=_ns,
doc='Franklin: CGS (ESU) unit of charge')
def_unit(['statA', 'statampere'], Fr * s ** -1, namespace=_ns,
doc='statampere: CGS (ESU) unit of current')
def_unit(['Bi', 'Biot', 'abA', 'abampere'],
g ** Fraction(1, 2) * cm ** Fraction(1, 2) * s ** -1, namespace=_ns,
doc='Biot: CGS (EMU) unit of current')
def_unit(['abC', 'abcoulomb'], Bi * s, namespace=_ns,
doc='abcoulomb: CGS (EMU) of charge')
###########################################################################
# MAGNETIC
def_unit(['G', 'Gauss', 'gauss'], 1e-4 * si.T, namespace=_ns, prefixes=True,
doc="Gauss: CGS unit for magnetic field")
def_unit(['Mx', 'Maxwell', 'maxwell'], 1e-8 * si.Wb, namespace=_ns,
doc="Maxwell: CGS unit for magnetic flux")
###########################################################################
# BASES
bases = {cm, g, s, rad, cd, K, mol}
###########################################################################
# CLEANUP
del UnitBase
del def_unit
del si
del Fraction
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
d6e5d3cc90bcf3c8a4ac50e47fe0d50bf8f55787bf45f4e54d4be3cc10a5a835 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines deprecated units.
These units are not available in the top-level `astropy.units`
namespace. To use these units, you must import the `astropy.units.deprecated`
module::
>>> from astropy.units import deprecated
>>> q = 10. * deprecated.emu # doctest: +SKIP
To include them in `~astropy.units.UnitBase.compose` and the results of
`~astropy.units.UnitBase.find_equivalent_units`, do::
>>> from astropy.units import deprecated
>>> deprecated.enable() # doctest: +SKIP
"""
_ns = globals()
def _initialize_module():
# Local imports to avoid polluting top-level namespace
from . import astrophys, cgs
from .core import _add_prefixes, def_unit
def_unit(['emu'], cgs.Bi, namespace=_ns,
doc='Biot: CGS (EMU) unit of current')
# Add only some *prefixes* as deprecated units.
_add_prefixes(astrophys.jupiterMass, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.earthMass, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.jupiterRad, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.earthRad, namespace=_ns, prefixes=True)
_initialize_module()
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_prefixonly_unit_summary as _generate_prefixonly_unit_summary
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
__doc__ += _generate_prefixonly_unit_summary(globals())
def enable():
"""
Enable deprecated units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`.
This may be used with the ``with`` statement to enable deprecated
units only temporarily.
"""
import inspect
# Local import to avoid cyclical import
# Local import to avoid polluting namespace
from .core import add_enabled_units
return add_enabled_units(inspect.getmodule(enable))
|
2c521c1cdd85b63fa69f00ba234d7c86ff54ce102d8d694c02102c7196deee4a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines structured units and quantities.
"""
# Standard library
import operator
import numpy as np
from .core import UNITY, Unit, UnitBase
__all__ = ['StructuredUnit']
DTYPE_OBJECT = np.dtype('O')
def _names_from_dtype(dtype):
"""Recursively extract field names from a dtype."""
names = []
for name in dtype.names:
subdtype = dtype.fields[name][0]
if subdtype.names:
names.append([name, _names_from_dtype(subdtype)])
else:
names.append(name)
return tuple(names)
def _normalize_names(names):
"""Recursively normalize, inferring upper level names for unadorned tuples.
Generally, we want the field names to be organized like dtypes, as in
``(['pv', ('p', 'v')], 't')``. But we automatically infer upper
field names if the list is absent from items like ``(('p', 'v'), 't')``,
by concatenating the names inside the tuple.
"""
result = []
for name in names:
if isinstance(name, str) and len(name) > 0:
result.append(name)
elif (isinstance(name, list)
and len(name) == 2
and isinstance(name[0], str) and len(name[0]) > 0
and isinstance(name[1], tuple) and len(name[1]) > 0):
result.append([name[0], _normalize_names(name[1])])
elif isinstance(name, tuple) and len(name) > 0:
new_tuple = _normalize_names(name)
result.append([''.join([(i[0] if isinstance(i, list) else i)
for i in new_tuple]), new_tuple])
else:
raise ValueError(f'invalid entry {name!r}. Should be a name, '
'tuple of names, or 2-element list of the '
'form [name, tuple of names].')
return tuple(result)
class StructuredUnit:
"""Container for units for a structured Quantity.
Parameters
----------
units : unit-like, tuple of unit-like, or `~astropy.units.StructuredUnit`
Tuples can be nested. If a `~astropy.units.StructuredUnit` is passed
in, it will be returned unchanged unless different names are requested.
names : tuple of str, tuple or list; `~numpy.dtype`; or `~astropy.units.StructuredUnit`, optional
Field names for the units, possibly nested. Can be inferred from a
structured `~numpy.dtype` or another `~astropy.units.StructuredUnit`.
For nested tuples, by default the name of the upper entry will be the
concatenation of the names of the lower levels. One can pass in a
list with the upper-level name and a tuple of lower-level names to
avoid this. For tuples, not all levels have to be given; for any level
not passed in, default field names of 'f0', 'f1', etc., will be used.
Notes
-----
It is recommended to initialze the class indirectly, using
`~astropy.units.Unit`. E.g., ``u.Unit('AU,AU/day')``.
When combined with a structured array to produce a structured
`~astropy.units.Quantity`, array field names will take precedence.
Generally, passing in ``names`` is needed only if the unit is used
unattached to a `~astropy.units.Quantity` and one needs to access its
fields.
Examples
--------
Various ways to initialize a `~astropy.units.StructuredUnit`::
>>> import astropy.units as u
>>> su = u.Unit('(AU,AU/day),yr')
>>> su
Unit("((AU, AU / d), yr)")
>>> su.field_names
(['f0', ('f0', 'f1')], 'f1')
>>> su['f1']
Unit("yr")
>>> su2 = u.StructuredUnit(((u.AU, u.AU/u.day), u.yr), names=(('p', 'v'), 't'))
>>> su2 == su
True
>>> su2.field_names
(['pv', ('p', 'v')], 't')
>>> su3 = u.StructuredUnit((su2['pv'], u.day), names=(['p_v', ('p', 'v')], 't'))
>>> su3.field_names
(['p_v', ('p', 'v')], 't')
>>> su3.keys()
('p_v', 't')
>>> su3.values()
(Unit("(AU, AU / d)"), Unit("d"))
Structured units share most methods with regular units::
>>> su.physical_type
((PhysicalType('length'), PhysicalType({'speed', 'velocity'})), PhysicalType('time'))
>>> su.si
Unit("((1.49598e+11 m, 1.73146e+06 m / s), 3.15576e+07 s)")
"""
def __new__(cls, units, names=None):
dtype = None
if names is not None:
if isinstance(names, StructuredUnit):
dtype = names._units.dtype
names = names.field_names
elif isinstance(names, np.dtype):
if not names.fields:
raise ValueError('dtype should be structured, with fields.')
dtype = np.dtype([(name, DTYPE_OBJECT) for name in names.names])
names = _names_from_dtype(names)
else:
if not isinstance(names, tuple):
names = (names,)
names = _normalize_names(names)
if not isinstance(units, tuple):
units = Unit(units)
if isinstance(units, StructuredUnit):
# Avoid constructing a new StructuredUnit if no field names
# are given, or if all field names are the same already anyway.
if names is None or units.field_names == names:
return units
# Otherwise, turn (the upper level) into a tuple, for renaming.
units = units.values()
else:
# Single regular unit: make a tuple for iteration below.
units = (units,)
if names is None:
names = tuple(f'f{i}' for i in range(len(units)))
elif len(units) != len(names):
raise ValueError("lengths of units and field names must match.")
converted = []
for unit, name in zip(units, names):
if isinstance(name, list):
# For list, the first item is the name of our level,
# and the second another tuple of names, i.e., we recurse.
unit = cls(unit, name[1])
name = name[0]
else:
# We are at the lowest level. Check unit.
unit = Unit(unit)
if dtype is not None and isinstance(unit, StructuredUnit):
raise ValueError("units do not match in depth with field "
"names from dtype or structured unit.")
converted.append(unit)
self = super().__new__(cls)
if dtype is None:
dtype = np.dtype([((name[0] if isinstance(name, list) else name),
DTYPE_OBJECT) for name in names])
# Decay array to void so we can access by field name and number.
self._units = np.array(tuple(converted), dtype)[()]
return self
def __getnewargs__(self):
"""When de-serializing, e.g. pickle, start with a blank structure."""
return (), None
@property
def field_names(self):
"""Possibly nested tuple of the field names of the parts."""
return tuple(([name, unit.field_names]
if isinstance(unit, StructuredUnit) else name)
for name, unit in self.items())
# Allow StructuredUnit to be treated as an (ordered) mapping.
def __len__(self):
return len(self._units.dtype.names)
def __getitem__(self, item):
# Since we are based on np.void, indexing by field number works too.
return self._units[item]
def values(self):
return self._units.item()
def keys(self):
return self._units.dtype.names
def items(self):
return tuple(zip(self._units.dtype.names, self._units.item()))
def __iter__(self):
yield from self._units.dtype.names
# Helpers for methods below.
def _recursively_apply(self, func, cls=None):
"""Apply func recursively.
Parameters
----------
func : callable
Function to apply to all parts of the structured unit,
recursing as needed.
cls : type, optional
If given, should be a subclass of `~numpy.void`. By default,
will return a new `~astropy.units.StructuredUnit` instance.
"""
results = np.array(tuple(func(part) for part in self.values()),
self._units.dtype)[()]
if cls is not None:
return results.view((cls, results.dtype))
# Short-cut; no need to interpret field names, etc.
result = super().__new__(self.__class__)
result._units = results
return result
def _recursively_get_dtype(self, value, enter_lists=True):
"""Get structured dtype according to value, using our field names.
This is useful since ``np.array(value)`` would treat tuples as lower
levels of the array, rather than as elements of a structured array.
The routine does presume that the type of the first tuple is
representative of the rest. Used in ``_get_converter``.
For the special value of ``UNITY``, all fields are assumed to be 1.0,
and hence this will return an all-float dtype.
"""
if enter_lists:
while isinstance(value, list):
value = value[0]
if value is UNITY:
value = (UNITY,) * len(self)
elif not isinstance(value, tuple) or len(self) != len(value):
raise ValueError(f"cannot interpret value {value} for unit {self}.")
descr = []
for (name, unit), part in zip(self.items(), value):
if isinstance(unit, StructuredUnit):
descr.append(
(name, unit._recursively_get_dtype(part, enter_lists=False)))
else:
# Got a part associated with a regular unit. Gets its dtype.
# Like for Quantity, we cast integers to float.
part = np.array(part)
part_dtype = part.dtype
if part_dtype.kind in 'iu':
part_dtype = np.dtype(float)
descr.append((name, part_dtype, part.shape))
return np.dtype(descr)
@property
def si(self):
"""The `StructuredUnit` instance in SI units."""
return self._recursively_apply(operator.attrgetter('si'))
@property
def cgs(self):
"""The `StructuredUnit` instance in cgs units."""
return self._recursively_apply(operator.attrgetter('cgs'))
# Needed to pass through Unit initializer, so might as well use it.
def _get_physical_type_id(self):
return self._recursively_apply(
operator.methodcaller('_get_physical_type_id'), cls=Structure)
@property
def physical_type(self):
"""Physical types of all the fields."""
return self._recursively_apply(
operator.attrgetter('physical_type'), cls=Structure)
def decompose(self, bases=set()):
"""The `StructuredUnit` composed of only irreducible units.
Parameters
----------
bases : sequence of `~astropy.units.UnitBase`, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `UnitsError` if it's not possible
to do so.
Returns
-------
`~astropy.units.StructuredUnit`
With the unit for each field containing only irreducible units.
"""
return self._recursively_apply(
operator.methodcaller('decompose', bases=bases))
def is_equivalent(self, other, equivalencies=[]):
"""`True` if all fields are equivalent to the other's fields.
Parameters
----------
other : `~astropy.units.StructuredUnit`
The structured unit to compare with, or what can initialize one.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
The list will be applied to all fields.
Returns
-------
bool
"""
try:
other = StructuredUnit(other)
except Exception:
return False
if len(self) != len(other):
return False
for self_part, other_part in zip(self.values(), other.values()):
if not self_part.is_equivalent(other_part,
equivalencies=equivalencies):
return False
return True
def _get_converter(self, other, equivalencies=[]):
if not isinstance(other, type(self)):
other = self.__class__(other, names=self)
converters = [self_part._get_converter(other_part,
equivalencies=equivalencies)
for (self_part, other_part) in zip(self.values(),
other.values())]
def converter(value):
if not hasattr(value, 'dtype'):
value = np.array(value, self._recursively_get_dtype(value))
result = np.empty_like(value)
for name, converter_ in zip(result.dtype.names, converters):
result[name] = converter_(value[name])
# Index with empty tuple to decay array scalars to numpy void.
return result if result.shape else result[()]
return converter
def to(self, other, value=np._NoValue, equivalencies=[]):
"""Return values converted to the specified unit.
Parameters
----------
other : `~astropy.units.StructuredUnit`
The unit to convert to. If necessary, will be converted to
a `~astropy.units.StructuredUnit` using the dtype of ``value``.
value : array-like, optional
Value(s) in the current unit to be converted to the
specified unit. If a sequence, the first element must have
entries of the correct type to represent all elements (i.e.,
not have, e.g., a ``float`` where other elements have ``complex``).
If not given, assumed to have 1. in all fields.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
values : scalar or array
Converted value(s).
Raises
------
UnitsError
If units are inconsistent
"""
if value is np._NoValue:
# We do not have UNITY as a default, since then the docstring
# would list 1.0 as default, yet one could not pass that in.
value = UNITY
return self._get_converter(other, equivalencies=equivalencies)(value)
def to_string(self, format='generic'):
"""Output the unit in the given format as a string.
Units are separated by commas.
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
Notes
-----
Structured units can be written to all formats, but can be
re-read only with 'generic'.
"""
parts = [part.to_string(format) for part in self.values()]
out_fmt = '({})' if len(self) > 1 else '({},)'
if format.startswith('latex'):
# Strip $ from parts and add them on the outside.
parts = [part[1:-1] for part in parts]
out_fmt = '$' + out_fmt + '$'
return out_fmt.format(', '.join(parts))
def _repr_latex_(self):
return self.to_string('latex')
__array_ufunc__ = None
def __mul__(self, other):
if isinstance(other, str):
try:
other = Unit(other, parse_strict='silent')
except Exception:
return NotImplemented
if isinstance(other, UnitBase):
new_units = tuple(part * other for part in self.values())
return self.__class__(new_units, names=self)
if isinstance(other, StructuredUnit):
return NotImplemented
# Anything not like a unit, try initialising as a structured quantity.
try:
from .quantity import Quantity
return Quantity(other, unit=self)
except Exception:
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
if isinstance(other, str):
try:
other = Unit(other, parse_strict='silent')
except Exception:
return NotImplemented
if isinstance(other, UnitBase):
new_units = tuple(part / other for part in self.values())
return self.__class__(new_units, names=self)
return NotImplemented
def __rlshift__(self, m):
try:
from .quantity import Quantity
return Quantity(m, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __str__(self):
return self.to_string()
def __repr__(self):
return f'Unit("{self.to_string()}")'
def __eq__(self, other):
try:
other = StructuredUnit(other)
except Exception:
return NotImplemented
return self.values() == other.values()
def __ne__(self, other):
if not isinstance(other, type(self)):
try:
other = StructuredUnit(other)
except Exception:
return NotImplemented
return self.values() != other.values()
class Structure(np.void):
"""Single element structure for physical type IDs, etc.
Behaves like a `~numpy.void` and thus mostly like a tuple which can also
be indexed with field names, but overrides ``__eq__`` and ``__ne__`` to
compare only the contents, not the field names. Furthermore, this way no
`FutureWarning` about comparisons is given.
"""
# Note that it is important for physical type IDs to not be stored in a
# tuple, since then the physical types would be treated as alternatives in
# :meth:`~astropy.units.UnitBase.is_equivalent`. (Of course, in that
# case, they could also not be indexed by name.)
def __eq__(self, other):
if isinstance(other, np.void):
other = other.item()
return self.item() == other
def __ne__(self, other):
if isinstance(other, np.void):
other = other.item()
return self.item() != other
|
1c0deebb215169b1574351866361d08be5c44a4b3cd0aab54c36dff00cc3931f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines the `Quantity` object, which represents a number with some
associated units. `Quantity` objects support operations like ordinary numbers,
but will deal with unit conversions internally.
"""
# STDLIB
import numbers
import operator
import re
import warnings
from fractions import Fraction
# THIRD PARTY
import numpy as np
# LOCAL
from astropy import config as _config
from astropy.utils.compat import NUMPY_LT_1_20, NUMPY_LT_1_22
from astropy.utils.compat.misc import override__dir__
from astropy.utils.data_info import ParentDtypeInfo
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from astropy.utils.misc import isiterable
from .core import (
Unit, UnitBase, UnitConversionError, UnitsError, UnitTypeError, dimensionless_unscaled,
get_current_unit_registry)
from .format.latex import Latex
from .quantity_helper import can_have_arbitrary_unit, check_output, converters_and_unit
from .quantity_helper.function_helpers import (
DISPATCHED_FUNCTIONS, FUNCTION_HELPERS, SUBCLASS_SAFE_FUNCTIONS, UNSUPPORTED_FUNCTIONS)
from .structured import StructuredUnit
from .utils import is_effectively_unity
__all__ = ["Quantity", "SpecificTypeQuantity",
"QuantityInfoBase", "QuantityInfo", "allclose", "isclose"]
# We don't want to run doctests in the docstrings we inherit from Numpy
__doctest_skip__ = ['Quantity.*']
_UNIT_NOT_INITIALISED = "(Unit not initialised)"
_UFUNCS_FILTER_WARNINGS = {np.arcsin, np.arccos, np.arccosh, np.arctanh}
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for Quantity
"""
latex_array_threshold = _config.ConfigItem(100,
'The maximum size an array Quantity can be before its LaTeX '
'representation for IPython gets "summarized" (meaning only the first '
'and last few elements are shown with "..." between). Setting this to a '
'negative number means that the value will instead be whatever numpy '
'gets from get_printoptions.')
conf = Conf()
class QuantityIterator:
"""
Flat iterator object to iterate over Quantities
A `QuantityIterator` iterator is returned by ``q.flat`` for any Quantity
``q``. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
Quantity.flatten : Returns a flattened copy of an array.
Notes
-----
`QuantityIterator` is inspired by `~numpy.ma.core.MaskedIterator`. It
is not exported by the `~astropy.units` module. Instead of
instantiating a `QuantityIterator` directly, use `Quantity.flat`.
"""
def __init__(self, q):
self._quantity = q
self._dataiter = q.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Quantity.
if isinstance(out, type(self._quantity)):
return out
else:
return self._quantity._new_view(out)
def __setitem__(self, index, value):
self._dataiter[index] = self._quantity._to_own_unit(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)
# ndarray.flat._dataiter returns scalars, so need a view as a Quantity.
return self._quantity._new_view(out)
next = __next__
def __len__(self):
return len(self._dataiter)
#### properties and methods to match `numpy.ndarray.flatiter` ####
@property
def base(self):
"""A reference to the array that is iterated over."""
return self._quantity
@property
def coords(self):
"""An N-dimensional tuple of current coordinates."""
return self._dataiter.coords
@property
def index(self):
"""Current flat index into the array."""
return self._dataiter.index
def copy(self):
"""Get a copy of the iterator as a 1-D array."""
return self._quantity.flatten()
class QuantityInfoBase(ParentDtypeInfo):
# This is on a base class rather than QuantityInfo directly, so that
# it can be used for EarthLocationInfo yet make clear that that class
# should not be considered a typical Quantity subclass by Table.
attrs_from_parent = {'dtype', 'unit'} # dtype and unit taken from parent
_supports_indexing = True
@staticmethod
def default_format(val):
return f'{val.value}'
@staticmethod
def possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
This method is overridden in order to suppress printing the unit
in each row since it is already at the top in the column header.
"""
yield lambda format_, val: format(val.value, format_)
yield lambda format_, val: format_.format(val.value)
yield lambda format_, val: format_ % val.value
class QuantityInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ('value', 'unit')
_construct_from_dict_args = ['value']
_represent_as_dict_primary_data = 'value'
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Quantity instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : `~astropy.units.Quantity` (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'format', 'description'))
# Make an empty quantity using the unit of the last one.
shape = (length,) + attrs.pop('shape')
dtype = attrs.pop('dtype')
# Use zeros so we do not get problems for Quantity subclasses such
# as Longitude and Latitude, which cannot take arbitrary values.
data = np.zeros(shape=shape, dtype=dtype)
# Get arguments needed to reconstruct class
map = {key: (data if key == 'value' else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs}
map['copy'] = False
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
For Quantity this is just the quantity itself.
Returns
-------
arrays : list of ndarray
"""
return [self._parent]
class Quantity(np.ndarray):
"""A `~astropy.units.Quantity` represents a number with some associated unit.
See also: https://docs.astropy.org/en/stable/units/quantity.html
Parameters
----------
value : number, `~numpy.ndarray`, `~astropy.units.Quantity` (sequence), or str
The numerical value of this quantity in the units given by unit. If a
`Quantity` or sequence of them (or any other valid object with a
``unit`` attribute), creates a new `Quantity` object, converting to
`unit` units as needed. If a string, it is converted to a number or
`Quantity`, depending on whether a unit is present.
unit : unit-like
An object that represents the unit associated with the input value.
Must be an `~astropy.units.UnitBase` object or a string parseable by
the :mod:`~astropy.units` package.
dtype : ~numpy.dtype, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any integer and (non-Quantity) object inputs are converted
to float by default.
If `None`, the normal `numpy.dtype` introspection is used, e.g.
preventing upcasting of integers.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. This parameter
is ignored if the input is a `Quantity` and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be a
`Quantity`. Otherwise, `Quantity` subclasses will be passed through,
or a subclass appropriate for the unit will be used (such as
`~astropy.units.Dex` for ``u.dex(u.AA)``).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be pre-pended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not either a :class:`~astropy.units.Unit`
object or a parseable string unit.
Notes
-----
Quantities can also be created by multiplying a number or array with a
:class:`~astropy.units.Unit`. See https://docs.astropy.org/en/latest/units/
Unless the ``dtype`` argument is explicitly specified, integer
or (non-Quantity) object inputs are converted to `float` by default.
"""
# Need to set a class-level default for _equivalencies, or
# Constants can not initialize properly
_equivalencies = []
# Default unit for initialization; can be overridden by subclasses,
# possibly to `None` to indicate there is no default unit.
_default_unit = dimensionless_unscaled
# Ensures views have an undefined unit.
_unit = None
__array_priority__ = 10000
def __class_getitem__(cls, unit_shape_dtype):
"""Quantity Type Hints.
Unit-aware type hints are ``Annotated`` objects that encode the class,
the unit, and possibly shape and dtype information, depending on the
python and :mod:`numpy` versions.
Schematically, ``Annotated[cls[shape, dtype], unit]``
As a classmethod, the type is the class, ie ``Quantity``
produces an ``Annotated[Quantity, ...]`` while a subclass
like :class:`~astropy.coordinates.Angle` returns
``Annotated[Angle, ...]``.
Parameters
----------
unit_shape_dtype : :class:`~astropy.units.UnitBase`, str, `~astropy.units.PhysicalType`, or tuple
Unit specification, can be the physical type (ie str or class).
If tuple, then the first element is the unit specification
and all other elements are for `numpy.ndarray` type annotations.
Whether they are included depends on the python and :mod:`numpy`
versions.
Returns
-------
`typing.Annotated`, `typing_extensions.Annotated`, `astropy.units.Unit`, or `astropy.units.PhysicalType`
Return type in this preference order:
* if python v3.9+ : `typing.Annotated`
* if :mod:`typing_extensions` is installed : `typing_extensions.Annotated`
* `astropy.units.Unit` or `astropy.units.PhysicalType`
Raises
------
TypeError
If the unit/physical_type annotation is not Unit-like or
PhysicalType-like.
Examples
--------
Create a unit-aware Quantity type annotation
>>> Quantity[Unit("s")]
Annotated[Quantity, Unit("s")]
See Also
--------
`~astropy.units.quantity_input`
Use annotations for unit checks on function arguments and results.
Notes
-----
With Python 3.9+ or :mod:`typing_extensions`, |Quantity| types are also
static-type compatible.
"""
# LOCAL
from ._typing import HAS_ANNOTATED, Annotated
# process whether [unit] or [unit, shape, ptype]
if isinstance(unit_shape_dtype, tuple): # unit, shape, dtype
target = unit_shape_dtype[0]
shape_dtype = unit_shape_dtype[1:]
else: # just unit
target = unit_shape_dtype
shape_dtype = ()
# Allowed unit/physical types. Errors if neither.
try:
unit = Unit(target)
except (TypeError, ValueError):
from astropy.units.physical import get_physical_type
try:
unit = get_physical_type(target)
except (TypeError, ValueError, KeyError): # KeyError for Enum
raise TypeError("unit annotation is not a Unit or PhysicalType") from None
# Allow to sort of work for python 3.8- / no typing_extensions
# instead of bailing out, return the unit for `quantity_input`
if not HAS_ANNOTATED:
warnings.warn("Quantity annotations are valid static type annotations only"
" if Python is v3.9+ or `typing_extensions` is installed.")
return unit
# Quantity does not (yet) properly extend the NumPy generics types,
# introduced in numpy v1.22+, instead just including the unit info as
# metadata using Annotated.
# TODO: ensure we do interact with NDArray.__class_getitem__.
return Annotated.__class_getitem__((cls, unit))
def __new__(cls, value, unit=None, dtype=np.inexact, copy=True, order=None,
subok=False, ndmin=0):
if unit is not None:
# convert unit first, to avoid multiple string->unit conversions
unit = Unit(unit)
# inexact -> upcast to float dtype
float_default = dtype is np.inexact
if float_default:
dtype = None
# optimize speed for Quantity with no dtype given, copy=False
if isinstance(value, Quantity):
if unit is not None and unit is not value.unit:
value = value.to(unit)
# the above already makes a copy (with float dtype)
copy = False
if type(value) is not cls and not (subok and
isinstance(value, cls)):
value = value.view(cls)
if float_default and value.dtype.kind in 'iu':
dtype = float
return np.array(value, dtype=dtype, copy=copy, order=order,
subok=True, ndmin=ndmin)
# Maybe str, or list/tuple of Quantity? If so, this may set value_unit.
# To ensure array remains fast, we short-circuit it.
value_unit = None
if not isinstance(value, np.ndarray):
if isinstance(value, str):
# The first part of the regex string matches any integer/float;
# the second parts adds possible trailing .+-, which will break
# the float function below and ensure things like 1.2.3deg
# will not work.
pattern = (r'\s*[+-]?'
r'((\d+\.?\d*)|(\.\d+)|([nN][aA][nN])|'
r'([iI][nN][fF]([iI][nN][iI][tT][yY]){0,1}))'
r'([eE][+-]?\d+)?'
r'[.+-]?')
v = re.match(pattern, value)
unit_string = None
try:
value = float(v.group())
except Exception:
raise TypeError('Cannot parse "{}" as a {}. It does not '
'start with a number.'
.format(value, cls.__name__))
unit_string = v.string[v.end():].strip()
if unit_string:
value_unit = Unit(unit_string)
if unit is None:
unit = value_unit # signal no conversion needed below.
elif isiterable(value) and len(value) > 0:
# Iterables like lists and tuples.
if all(isinstance(v, Quantity) for v in value):
# If a list/tuple containing only quantities, convert all
# to the same unit.
if unit is None:
unit = value[0].unit
value = [q.to_value(unit) for q in value]
value_unit = unit # signal below that conversion has been done
elif (dtype is None and not hasattr(value, 'dtype')
and isinstance(unit, StructuredUnit)):
# Special case for list/tuple of values and a structured unit:
# ``np.array(value, dtype=None)`` would treat tuples as lower
# levels of the array, rather than as elements of a structured
# array, so we use the structure of the unit to help infer the
# structured dtype of the value.
dtype = unit._recursively_get_dtype(value)
if value_unit is None:
# If the value has a `unit` attribute and if not None
# (for Columns with uninitialized unit), treat it like a quantity.
value_unit = getattr(value, 'unit', None)
if value_unit is None:
# Default to dimensionless for no (initialized) unit attribute.
if unit is None:
unit = cls._default_unit
value_unit = unit # signal below that no conversion is needed
else:
try:
value_unit = Unit(value_unit)
except Exception as exc:
raise TypeError("The unit attribute {!r} of the input could "
"not be parsed as an astropy Unit, raising "
"the following exception:\n{}"
.format(value.unit, exc))
if unit is None:
unit = value_unit
elif unit is not value_unit:
copy = False # copy will be made in conversion at end
value = np.array(value, dtype=dtype, copy=copy, order=order,
subok=True, ndmin=ndmin)
# check that array contains numbers or long int objects
if (value.dtype.kind in 'OSU' and
not (value.dtype.kind == 'O' and
isinstance(value.item(0), numbers.Number))):
raise TypeError("The value must be a valid Python or "
"Numpy numeric type.")
# by default, cast any integer, boolean, etc., to float
if float_default and value.dtype.kind in 'iuO':
value = value.astype(float)
# if we allow subclasses, allow a class from the unit.
if subok:
qcls = getattr(unit, '_quantity_class', cls)
if issubclass(qcls, cls):
cls = qcls
value = value.view(cls)
value._set_unit(value_unit)
if unit is value_unit:
return value
else:
# here we had non-Quantity input that had a "unit" attribute
# with a unit different from the desired one. So, convert.
return value.to(unit)
def __array_finalize__(self, obj):
# Check whether super().__array_finalize should be called
# (sadly, ndarray.__array_finalize__ is None; we cannot be sure
# what is above us).
super_array_finalize = super().__array_finalize__
if super_array_finalize is not None:
super_array_finalize(obj)
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# If our unit is not set and obj has a valid one, use it.
if self._unit is None:
unit = getattr(obj, '_unit', None)
if unit is not None:
self._set_unit(unit)
# Copy info if the original had `info` defined. Because of the way the
# DataInfo works, `'info' in obj.__dict__` is False until the
# `info` attribute is accessed or set.
if 'info' in obj.__dict__:
self.info = obj.info
def __array_wrap__(self, obj, context=None):
if context is None:
# Methods like .squeeze() created a new `ndarray` and then call
# __array_wrap__ to turn the array into self's subclass.
return self._new_view(obj)
raise NotImplementedError('__array_wrap__ should not be used '
'with a context any more since all use '
'should go through array_function. '
'Please raise an issue on '
'https://github.com/astropy/astropy')
def __array_ufunc__(self, function, method, *inputs, **kwargs):
"""Wrap numpy ufuncs, taking care of units.
Parameters
----------
function : callable
ufunc to wrap.
method : str
Ufunc method: ``__call__``, ``at``, ``reduce``, etc.
inputs : tuple
Input arrays.
kwargs : keyword arguments
As passed on, with ``out`` containing possible quantity output.
Returns
-------
result : `~astropy.units.Quantity`
Results of the ufunc, with the unit set properly.
"""
# Determine required conversion functions -- to bring the unit of the
# input to that expected (e.g., radian for np.sin), or to get
# consistent units between two inputs (e.g., in np.add) --
# and the unit of the result (or tuple of units for nout > 1).
converters, unit = converters_and_unit(function, method, *inputs)
out = kwargs.get('out', None)
# Avoid loop back by turning any Quantity output into array views.
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
if function.nout == 1:
out = out[0]
out_array = check_output(out, unit, inputs, function=function)
# Ensure output argument remains a tuple.
kwargs['out'] = (out_array,) if function.nout == 1 else out_array
if method == 'reduce' and 'initial' in kwargs and unit is not None:
# Special-case for initial argument for reductions like
# np.add.reduce. This should be converted to the output unit as
# well, which is typically the same as the input unit (but can
# in principle be different: unitless for np.equal, radian
# for np.arctan2, though those are not necessarily useful!)
kwargs['initial'] = self._to_own_unit(kwargs['initial'],
check_precision=False, unit=unit)
# Same for inputs, but here also convert if necessary.
arrays = []
for input_, converter in zip(inputs, converters):
input_ = getattr(input_, 'value', input_)
arrays.append(converter(input_) if converter else input_)
# Call our superclass's __array_ufunc__
result = super().__array_ufunc__(function, method, *arrays, **kwargs)
# If unit is None, a plain array is expected (e.g., comparisons), which
# means we're done.
# We're also done if the result was None (for method 'at') or
# NotImplemented, which can happen if other inputs/outputs override
# __array_ufunc__; hopefully, they can then deal with us.
if unit is None or result is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out)
def _result_as_quantity(self, result, unit, out):
"""Turn result into a quantity with the given unit.
If no output is given, it will take a view of the array as a quantity,
and set the unit. If output is given, those should be quantity views
of the result arrays, and the function will just set the unit.
Parameters
----------
result : ndarray or tuple thereof
Array(s) which need to be turned into quantity.
unit : `~astropy.units.Unit`
Unit for the quantities to be returned (or `None` if the result
should not be a quantity). Should be tuple if result is a tuple.
out : `~astropy.units.Quantity` or None
Possible output quantity. Should be `None` or a tuple if result
is a tuple.
Returns
-------
out : `~astropy.units.Quantity`
With units set.
"""
if isinstance(result, (tuple, list)):
if out is None:
out = (None,) * len(result)
return result.__class__(
self._result_as_quantity(result_, unit_, out_)
for (result_, unit_, out_) in
zip(result, unit, out))
if out is None:
# View the result array as a Quantity with the proper unit.
return result if unit is None else self._new_view(result, unit)
# For given output, just set the unit. We know the unit is not None and
# the output is of the correct Quantity subclass, as it was passed
# through check_output.
out._set_unit(unit)
return out
def __quantity_subclass__(self, unit):
"""
Overridden by subclasses to change what kind of view is
created based on the output unit of an operation.
Parameters
----------
unit : UnitBase
The unit for which the appropriate class should be returned
Returns
-------
tuple :
- `~astropy.units.Quantity` subclass
- bool: True if subclasses of the given class are ok
"""
return Quantity, True
def _new_view(self, obj=None, unit=None):
"""
Create a Quantity view of some array-like input, and set the unit
By default, return a view of ``obj`` of the same class as ``self`` and
with the same unit. Subclasses can override the type of class for a
given unit using ``__quantity_subclass__``, and can ensure properties
other than the unit are copied using ``__array_finalize__``.
If the given unit defines a ``_quantity_class`` of which ``self``
is not an instance, a view using this class is taken.
Parameters
----------
obj : ndarray or scalar, optional
The array to create a view of. If obj is a numpy or python scalar,
it will be converted to an array scalar. By default, ``self``
is converted.
unit : unit-like, optional
The unit of the resulting object. It is used to select a
subclass, and explicitly assigned to the view if given.
If not given, the subclass and unit will be that of ``self``.
Returns
-------
view : `~astropy.units.Quantity` subclass
"""
# Determine the unit and quantity subclass that we need for the view.
if unit is None:
unit = self.unit
quantity_subclass = self.__class__
elif unit is self.unit and self.__class__ is Quantity:
# The second part is because we should not presume what other
# classes want to do for the same unit. E.g., Constant will
# always want to fall back to Quantity, and relies on going
# through `__quantity_subclass__`.
quantity_subclass = Quantity
else:
unit = Unit(unit)
quantity_subclass = getattr(unit, '_quantity_class', Quantity)
if isinstance(self, quantity_subclass):
quantity_subclass, subok = self.__quantity_subclass__(unit)
if subok:
quantity_subclass = self.__class__
# We only want to propagate information from ``self`` to our new view,
# so obj should be a regular array. By using ``np.array``, we also
# convert python and numpy scalars, which cannot be viewed as arrays
# and thus not as Quantity either, to zero-dimensional arrays.
# (These are turned back into scalar in `.value`)
# Note that for an ndarray input, the np.array call takes only double
# ``obj.__class is np.ndarray``. So, not worth special-casing.
if obj is None:
obj = self.view(np.ndarray)
else:
obj = np.array(obj, copy=False, subok=True)
# Take the view, set the unit, and update possible other properties
# such as ``info``, ``wrap_angle`` in `Longitude`, etc.
view = obj.view(quantity_subclass)
view._set_unit(unit)
view.__array_finalize__(self)
return view
def _set_unit(self, unit):
"""Set the unit.
This is used anywhere the unit is set or modified, i.e., in the
initilizer, in ``__imul__`` and ``__itruediv__`` for in-place
multiplication and division by another unit, as well as in
``__array_finalize__`` for wrapping up views. For Quantity, it just
sets the unit, but subclasses can override it to check that, e.g.,
a unit is consistent.
"""
if not isinstance(unit, UnitBase):
if (isinstance(self._unit, StructuredUnit)
or isinstance(unit, StructuredUnit)):
unit = StructuredUnit(unit, self.dtype)
else:
# Trying to go through a string ensures that, e.g., Magnitudes with
# dimensionless physical unit become Quantity with units of mag.
unit = Unit(str(unit), parse_strict='silent')
if not isinstance(unit, (UnitBase, StructuredUnit)):
raise UnitTypeError(
"{} instances require normal units, not {} instances."
.format(type(self).__name__, type(unit)))
self._unit = unit
def __deepcopy__(self, memo):
# If we don't define this, ``copy.deepcopy(quantity)`` will
# return a bare Numpy array.
return self.copy()
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
info = QuantityInfo()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
if equivalencies == []:
equivalencies = self._equivalencies
if not self.dtype.names or isinstance(self.unit, StructuredUnit):
# Standard path, let unit to do work.
return self.unit.to(unit, self.view(np.ndarray),
equivalencies=equivalencies)
else:
# The .to() method of a simple unit cannot convert a structured
# dtype, so we work around it, by recursing.
# TODO: deprecate this?
# Convert simple to Structured on initialization?
result = np.empty_like(self.view(np.ndarray))
for name in self.dtype.names:
result[name] = self[name]._to_value(unit, equivalencies)
return result
def to(self, unit, equivalencies=[], copy=True):
"""
Return a new `~astropy.units.Quantity` object with the specified unit.
Parameters
----------
unit : unit-like
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
If not provided or ``[]``, class default equivalencies will be used
(none for `~astropy.units.Quantity`, but may be set for subclasses)
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy
will only be made if necessary.
See also
--------
to_value : get the numerical value in a given unit.
"""
# We don't use `to_value` below since we always want to make a copy
# and don't want to slow down this method (esp. the scalar case).
unit = Unit(unit)
if copy:
# Avoid using to_value to ensure that we make a copy. We also
# don't want to slow down this method (esp. the scalar case).
value = self._to_value(unit, equivalencies)
else:
# to_value only copies if necessary
value = self.to_value(unit, equivalencies)
return self._new_view(value, unit)
def to_value(self, unit=None, equivalencies=[]):
"""
The numerical value, possibly in a different unit.
Parameters
----------
unit : unit-like, optional
The unit in which the value should be given. If not given or `None`,
use the current unit.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If not provided
or ``[]``, class default equivalencies will be used (none for
`~astropy.units.Quantity`, but may be set for subclasses).
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
Returns
-------
value : ndarray or scalar
The value in the units specified. For arrays, this will be a view
of the data if no unit conversion was necessary.
See also
--------
to : Get a new instance in a different unit.
"""
if unit is None or unit is self.unit:
value = self.view(np.ndarray)
elif not self.dtype.names:
# For non-structured, we attempt a short-cut, where we just get
# the scale. If that is 1, we do not have to do anything.
unit = Unit(unit)
# We want a view if the unit does not change. One could check
# with "==", but that calculates the scale that we need anyway.
# TODO: would be better for `unit.to` to have an in-place flag.
try:
scale = self.unit._to(unit)
except Exception:
# Short-cut failed; try default (maybe equivalencies help).
value = self._to_value(unit, equivalencies)
else:
value = self.view(np.ndarray)
if not is_effectively_unity(scale):
# not in-place!
value = value * scale
else:
# For structured arrays, we go the default route.
value = self._to_value(unit, equivalencies)
# Index with empty tuple to decay array scalars in to numpy scalars.
return value if value.shape else value[()]
value = property(to_value,
doc="""The numerical value of this instance.
See also
--------
to_value : Get the numerical value in a given unit.
""")
@property
def unit(self):
"""
A `~astropy.units.UnitBase` object representing the unit of this
quantity.
"""
return self._unit
@property
def equivalencies(self):
"""
A list of equivalencies that will be applied by default during
unit conversions.
"""
return self._equivalencies
def _recursively_apply(self, func):
"""Apply function recursively to every field.
Returns a copy with the result.
"""
result = np.empty_like(self)
result_value = result.view(np.ndarray)
result_unit = ()
for name in self.dtype.names:
part = func(self[name])
result_value[name] = part.value
result_unit += (part.unit,)
result._set_unit(result_unit)
return result
@property
def si(self):
"""
Returns a copy of the current `Quantity` instance with SI units. The
value of the resulting object will be scaled.
"""
if self.dtype.names:
return self._recursively_apply(operator.attrgetter('si'))
si_unit = self.unit.si
return self._new_view(self.value * si_unit.scale,
si_unit / si_unit.scale)
@property
def cgs(self):
"""
Returns a copy of the current `Quantity` instance with CGS units. The
value of the resulting object will be scaled.
"""
if self.dtype.names:
return self._recursively_apply(operator.attrgetter('cgs'))
cgs_unit = self.unit.cgs
return self._new_view(self.value * cgs_unit.scale,
cgs_unit / cgs_unit.scale)
@property
def isscalar(self):
"""
True if the `value` of this quantity is a scalar, or False if it
is an array-like object.
.. note::
This is subtly different from `numpy.isscalar` in that
`numpy.isscalar` returns False for a zero-dimensional array
(e.g. ``np.array(1)``), while this is True for quantities,
since quantities cannot represent true numpy scalars.
"""
return not self.shape
# This flag controls whether convenience conversion members, such
# as `q.m` equivalent to `q.to_value(u.m)` are available. This is
# not turned on on Quantity itself, but is on some subclasses of
# Quantity, such as `astropy.coordinates.Angle`.
_include_easy_conversion_members = False
@override__dir__
def __dir__(self):
"""
Quantities are able to directly convert to other units that
have the same physical type. This function is implemented in
order to make autocompletion still work correctly in IPython.
"""
if not self._include_easy_conversion_members:
return []
extra_members = set()
equivalencies = Unit._normalize_equivalencies(self.equivalencies)
for equivalent in self.unit._get_units_with_same_physical_type(
equivalencies):
extra_members.update(equivalent.names)
return extra_members
def __getattr__(self, attr):
"""
Quantities are able to directly convert to other units that
have the same physical type.
"""
if not self._include_easy_conversion_members:
raise AttributeError(
f"'{self.__class__.__name__}' object has no '{attr}' member")
def get_virtual_unit_attribute():
registry = get_current_unit_registry().registry
to_unit = registry.get(attr, None)
if to_unit is None:
return None
try:
return self.unit.to(
to_unit, self.value, equivalencies=self.equivalencies)
except UnitsError:
return None
value = get_virtual_unit_attribute()
if value is None:
raise AttributeError(
f"{self.__class__.__name__} instance has no attribute '{attr}'")
else:
return value
# Equality needs to be handled explicitly as ndarray.__eq__ gives
# DeprecationWarnings on any error, which is distracting, and does not
# deal well with structured arrays (nor does the ufunc).
def __eq__(self, other):
try:
other_value = self._to_own_unit(other)
except UnitsError:
return False
except Exception:
return NotImplemented
return self.value.__eq__(other_value)
def __ne__(self, other):
try:
other_value = self._to_own_unit(other)
except UnitsError:
return True
except Exception:
return NotImplemented
return self.value.__ne__(other_value)
# Unit conversion operator (<<).
def __lshift__(self, other):
try:
other = Unit(other, parse_strict='silent')
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
def __ilshift__(self, other):
try:
other = Unit(other, parse_strict='silent')
except UnitTypeError:
return NotImplemented
try:
factor = self.unit._to(other)
except Exception:
# Maybe via equivalencies? Now we do make a temporary copy.
try:
value = self._to_value(other)
except UnitConversionError:
return NotImplemented
self.view(np.ndarray)[...] = value
else:
self.view(np.ndarray)[...] *= factor
self._set_unit(other)
return self
def __rlshift__(self, other):
if not self.isscalar:
return NotImplemented
return Unit(self).__rlshift__(other)
# Give warning for other >> self, since probably other << self was meant.
def __rrshift__(self, other):
warnings.warn(">> is not implemented. Did you mean to convert "
"something to this quantity as a unit using '<<'?",
AstropyWarning)
return NotImplemented
# Also define __rshift__ and __irshift__ so we override default ndarray
# behaviour, but instead of emitting a warning here, let it be done by
# other (which likely is a unit if this was a mistake).
def __rshift__(self, other):
return NotImplemented
def __irshift__(self, other):
return NotImplemented
# Arithmetic operations
def __mul__(self, other):
""" Multiplication between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), other * self.unit)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__mul__(other)
def __imul__(self, other):
"""In-place multiplication between `Quantity` objects and others."""
if isinstance(other, (UnitBase, str)):
self._set_unit(other * self.unit)
return self
return super().__imul__(other)
def __rmul__(self, other):
""" Right Multiplication between `Quantity` objects and other
objects.
"""
return self.__mul__(other)
def __truediv__(self, other):
""" Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), self.unit / other)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__truediv__(other)
def __itruediv__(self, other):
"""Inplace division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
self._set_unit(self.unit / other)
return self
return super().__itruediv__(other)
def __rtruediv__(self, other):
""" Right Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
return self._new_view(1. / self.value, other / self.unit)
return super().__rtruediv__(other)
def __pow__(self, other):
if isinstance(other, Fraction):
# Avoid getting object arrays by raising the value to a Fraction.
return self._new_view(self.value ** float(other),
self.unit ** other)
return super().__pow__(other)
# other overrides of special functions
def __hash__(self):
return hash(self.value) ^ hash(self.unit)
def __iter__(self):
if self.isscalar:
raise TypeError(
"'{cls}' object with a scalar value is not iterable"
.format(cls=self.__class__.__name__))
# Otherwise return a generator
def quantity_iter():
for val in self.value:
yield self._new_view(val)
return quantity_iter()
def __getitem__(self, key):
if isinstance(key, str) and isinstance(self.unit, StructuredUnit):
return self._new_view(self.view(np.ndarray)[key], self.unit[key])
try:
out = super().__getitem__(key)
except IndexError:
# We want zero-dimensional Quantity objects to behave like scalars,
# so they should raise a TypeError rather than an IndexError.
if self.isscalar:
raise TypeError(
"'{cls}' object with a scalar value does not support "
"indexing".format(cls=self.__class__.__name__))
else:
raise
# For single elements, ndarray.__getitem__ returns scalars; these
# need a new view as a Quantity.
if not isinstance(out, np.ndarray):
out = self._new_view(out)
return out
def __setitem__(self, i, value):
if isinstance(i, str):
# Indexing will cause a different unit, so by doing this in
# two steps we effectively try with the right unit.
self[i][...] = value
return
# update indices in info if the info property has been accessed
# (in which case 'info' in self.__dict__ is True; this is guaranteed
# to be the case if we're part of a table).
if not self.isscalar and 'info' in self.__dict__:
self.info.adjust_indices(i, value, len(self))
self.view(np.ndarray).__setitem__(i, self._to_own_unit(value))
# __contains__ is OK
def __bool__(self):
"""Quantities should always be treated as non-False; there is too much
potential for ambiguity otherwise.
"""
warnings.warn('The truth value of a Quantity is ambiguous. '
'In the future this will raise a ValueError.',
AstropyDeprecationWarning)
return True
def __len__(self):
if self.isscalar:
raise TypeError("'{cls}' object with a scalar value has no "
"len()".format(cls=self.__class__.__name__))
else:
return len(self.value)
# Numerical types
def __float__(self):
try:
return float(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError('only dimensionless scalar quantities can be '
'converted to Python scalars')
def __int__(self):
try:
return int(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError('only dimensionless scalar quantities can be '
'converted to Python scalars')
def __index__(self):
# for indices, we do not want to mess around with scaling at all,
# so unlike for float, int, we insist here on unscaled dimensionless
try:
assert self.unit.is_unity()
return self.value.__index__()
except Exception:
raise TypeError('only integer dimensionless scalar quantities '
'can be converted to a Python index')
# TODO: we may want to add a hook for dimensionless quantities?
@property
def _unitstr(self):
if self.unit is None:
unitstr = _UNIT_NOT_INITIALISED
else:
unitstr = str(self.unit)
if unitstr:
unitstr = ' ' + unitstr
return unitstr
def to_string(self, unit=None, precision=None, format=None, subfmt=None):
"""
Generate a string representation of the quantity and its unit.
The behavior of this function can be altered via the
`numpy.set_printoptions` function and its various keywords. The
exception to this is the ``threshold`` keyword, which is controlled via
the ``[units.quantity]`` configuration item ``latex_array_threshold``.
This is treated separately because the numpy default of 1000 is too big
for most browsers to handle.
Parameters
----------
unit : unit-like, optional
Specifies the unit. If not provided,
the unit used to initialize the quantity will be used.
precision : number, optional
The level of decimal precision. If `None`, or not provided,
it will be determined from NumPy print options.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
- 'latex_inline': Return a LaTeX-formatted string that uses
negative exponents instead of fractions
subfmt : str, optional
Subformat of the result. For the moment, only used for
``format='latex'`` and ``format='latex_inline'``. Supported
values are:
- 'inline': Use ``$ ... $`` as delimiters.
- 'display': Use ``$\\displaystyle ... $`` as delimiters.
Returns
-------
str
A string with the contents of this Quantity
"""
if unit is not None and unit != self.unit:
return self.to(unit).to_string(
unit=None, precision=precision, format=format, subfmt=subfmt)
formats = {
None: None,
"latex": {
None: ("$", "$"),
"inline": ("$", "$"),
"display": (r"$\displaystyle ", r"$"),
},
}
formats['latex_inline'] = formats['latex']
if format not in formats:
raise ValueError(f"Unknown format '{format}'")
elif format is None:
if precision is None:
# Use default formatting settings
return f'{self.value}{self._unitstr:s}'
else:
# np.array2string properly formats arrays as well as scalars
return np.array2string(self.value, precision=precision,
floatmode="fixed") + self._unitstr
# else, for the moment we assume format="latex" or "latex_inline".
# Set the precision if set, otherwise use numpy default
pops = np.get_printoptions()
format_spec = f".{precision if precision is not None else pops['precision']}g"
def float_formatter(value):
return Latex.format_exponential_notation(value,
format_spec=format_spec)
def complex_formatter(value):
return '({}{}i)'.format(
Latex.format_exponential_notation(value.real,
format_spec=format_spec),
Latex.format_exponential_notation(value.imag,
format_spec='+' + format_spec))
# The view is needed for the scalar case - self.value might be float.
latex_value = np.array2string(
self.view(np.ndarray),
threshold=(conf.latex_array_threshold
if conf.latex_array_threshold > -1 else pops['threshold']),
formatter={'float_kind': float_formatter,
'complex_kind': complex_formatter},
max_line_width=np.inf,
separator=',~')
latex_value = latex_value.replace('...', r'\dots')
# Format unit
# [1:-1] strips the '$' on either side needed for math mode
if self.unit is None:
latex_unit = _UNIT_NOT_INITIALISED
elif format == 'latex':
latex_unit = self.unit._repr_latex_()[1:-1] # note this is unicode
elif format == 'latex_inline':
latex_unit = self.unit.to_string(format='latex_inline')[1:-1]
delimiter_left, delimiter_right = formats[format][subfmt]
return rf'{delimiter_left}{latex_value} \; {latex_unit}{delimiter_right}'
def __str__(self):
return self.to_string()
def __repr__(self):
prefixstr = '<' + self.__class__.__name__ + ' '
arrstr = np.array2string(self.view(np.ndarray), separator=', ',
prefix=prefixstr)
return f'{prefixstr}{arrstr}{self._unitstr:s}>'
def _repr_latex_(self):
"""
Generate a latex representation of the quantity and its unit.
Returns
-------
lstr
A LaTeX string with the contents of this Quantity
"""
# NOTE: This should change to display format in a future release
return self.to_string(format='latex', subfmt='inline')
def __format__(self, format_spec):
"""
Format quantities using the new-style python formatting codes
as specifiers for the number.
If the format specifier correctly applies itself to the value,
then it is used to format only the value. If it cannot be
applied to the value, then it is applied to the whole string.
"""
try:
value = format(self.value, format_spec)
full_format_spec = "s"
except ValueError:
value = self.value
full_format_spec = format_spec
return format(f"{value}{self._unitstr:s}",
full_format_spec)
def decompose(self, bases=[]):
"""
Generates a new `Quantity` with the units
decomposed. Decomposed units have only irreducible units in
them (see `astropy.units.UnitBase.decompose`).
Parameters
----------
bases : sequence of `~astropy.units.UnitBase`, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
return self._decompose(False, bases=bases)
def _decompose(self, allowscaledunits=False, bases=[]):
"""
Generates a new `Quantity` with the units decomposed. Decomposed
units have only irreducible units in them (see
`astropy.units.UnitBase.decompose`).
Parameters
----------
allowscaledunits : bool
If True, the resulting `Quantity` may have a scale factor
associated with it. If False, any scaling in the unit will
be subsumed into the value of the resulting `Quantity`
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
new_unit = self.unit.decompose(bases=bases)
# Be careful here because self.value usually is a view of self;
# be sure that the original value is not being modified.
if not allowscaledunits and hasattr(new_unit, 'scale'):
new_value = self.value * new_unit.scale
new_unit = new_unit / new_unit.scale
return self._new_view(new_value, new_unit)
else:
return self._new_view(self.copy(), new_unit)
# These functions need to be overridden to take into account the units
# Array conversion
# https://numpy.org/doc/stable/reference/arrays.ndarray.html#array-conversion
def item(self, *args):
"""Copy an element of an array to a scalar Quantity and return it.
Like :meth:`~numpy.ndarray.item` except that it always
returns a `Quantity`, not a Python scalar.
"""
return self._new_view(super().item(*args))
def tolist(self):
raise NotImplementedError("cannot make a list of Quantities. Get "
"list of values with q.value.tolist()")
def _to_own_unit(self, value, check_precision=True, *, unit=None):
"""Convert value to one's own unit (or that given).
Here, non-quantities are treated as dimensionless, and care is taken
for values of 0, infinity or nan, which are allowed to have any unit.
Parameters
----------
value : anything convertible to `~astropy.units.Quantity`
The value to be converted to the requested unit.
check_precision : bool
Whether to forbit conversion of float to integer if that changes
the input number. Default: `True`.
unit : `~astropy.units.Unit` or None
The unit to convert to. By default, the unit of ``self``.
Returns
-------
value : number or `~numpy.ndarray`
In the requested units.
"""
if unit is None:
unit = self.unit
try:
_value = value.to_value(unit)
except AttributeError:
# We're not a Quantity.
# First remove two special cases (with a fast test):
# 1) Maybe masked printing? MaskedArray with quantities does not
# work very well, but no reason to break even repr and str.
# 2) np.ma.masked? useful if we're a MaskedQuantity.
if (value is np.ma.masked
or (value is np.ma.masked_print_option
and self.dtype.kind == 'O')):
return value
# Now, let's try a more general conversion.
# Plain arrays will be converted to dimensionless in the process,
# but anything with a unit attribute will use that.
try:
as_quantity = Quantity(value)
_value = as_quantity.to_value(unit)
except UnitsError:
# last chance: if this was not something with a unit
# and is all 0, inf, or nan, we treat it as arbitrary unit.
if (not hasattr(value, 'unit') and
can_have_arbitrary_unit(as_quantity.value)):
_value = as_quantity.value
else:
raise
if self.dtype.kind == 'i' and check_precision:
# If, e.g., we are casting float to int, we want to fail if
# precision is lost, but let things pass if it works.
_value = np.array(_value, copy=False, subok=True)
if not np.can_cast(_value.dtype, self.dtype):
self_dtype_array = np.array(_value, self.dtype, subok=True)
if not np.all(np.logical_or(self_dtype_array == _value,
np.isnan(_value))):
raise TypeError("cannot convert value type to array type "
"without precision loss")
# Setting names to ensure things like equality work (note that
# above will have failed already if units did not match).
if self.dtype.names:
_value.dtype.names = self.dtype.names
return _value
def itemset(self, *args):
if len(args) == 0:
raise ValueError("itemset must have at least one argument")
self.view(np.ndarray).itemset(*(args[:-1] +
(self._to_own_unit(args[-1]),)))
def tostring(self, order='C'):
raise NotImplementedError("cannot write Quantities to string. Write "
"array with q.value.tostring(...).")
def tobytes(self, order='C'):
raise NotImplementedError("cannot write Quantities to string. Write "
"array with q.value.tobytes(...).")
def tofile(self, fid, sep="", format="%s"):
raise NotImplementedError("cannot write Quantities to file. Write "
"array with q.value.tofile(...)")
def dump(self, file):
raise NotImplementedError("cannot dump Quantities to file. Write "
"array with q.value.dump()")
def dumps(self):
raise NotImplementedError("cannot dump Quantities to string. Write "
"array with q.value.dumps()")
# astype, byteswap, copy, view, getfield, setflags OK as is
def fill(self, value):
self.view(np.ndarray).fill(self._to_own_unit(value))
# Shape manipulation: resize cannot be done (does not own data), but
# shape, transpose, swapaxes, flatten, ravel, squeeze all OK. Only
# the flat iterator needs to be overwritten, otherwise single items are
# returned as numbers.
@property
def flat(self):
"""A 1-D iterator over the Quantity array.
This returns a ``QuantityIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to, but not a subclass of, Python's built-in iterator
object.
"""
return QuantityIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
# Item selection and manipulation
# repeat, sort, compress, diagonal OK
def take(self, indices, axis=None, out=None, mode='raise'):
out = super().take(indices, axis=axis, out=out, mode=mode)
# For single elements, ndarray.take returns scalars; these
# need a new view as a Quantity.
if type(out) is not type(self):
out = self._new_view(out)
return out
def put(self, indices, values, mode='raise'):
self.view(np.ndarray).put(indices, self._to_own_unit(values), mode)
def choose(self, choices, out=None, mode='raise'):
raise NotImplementedError("cannot choose based on quantity. Choose "
"using array with q.value.choose(...)")
# ensure we do not return indices as quantities
def argsort(self, axis=-1, kind='quicksort', order=None):
return self.view(np.ndarray).argsort(axis=axis, kind=kind, order=order)
def searchsorted(self, v, *args, **kwargs):
return np.searchsorted(np.array(self),
self._to_own_unit(v, check_precision=False),
*args, **kwargs) # avoid numpy 1.6 problem
if NUMPY_LT_1_22:
def argmax(self, axis=None, out=None):
return self.view(np.ndarray).argmax(axis, out=out)
def argmin(self, axis=None, out=None):
return self.view(np.ndarray).argmin(axis, out=out)
else:
def argmax(self, axis=None, out=None, *, keepdims=False):
return self.view(np.ndarray).argmax(axis=axis, out=out, keepdims=keepdims)
def argmin(self, axis=None, out=None, *, keepdims=False):
return self.view(np.ndarray).argmin(axis=axis, out=out, keepdims=keepdims)
def __array_function__(self, function, types, args, kwargs):
"""Wrap numpy functions, taking care of units.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
Returns
-------
result: `~astropy.units.Quantity`, `~numpy.ndarray`
As appropriate for the function. If the function is not
supported, `NotImplemented` is returned, which will lead to
a `TypeError` unless another argument overrode the function.
Raises
------
~astropy.units.UnitsError
If operands have incompatible units.
"""
# A function should be in one of the following sets or dicts:
# 1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
# supports Quantity; we pass on to ndarray.__array_function__.
# 2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
# after converting quantities to arrays with suitable units,
# and possibly setting units on the result.
# 3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
# requires a Quantity-specific implementation.
# 4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
# For now, since we may not yet have complete coverage, if a
# function is in none of the above, we simply call the numpy
# implementation.
if function in SUBCLASS_SAFE_FUNCTIONS:
return super().__array_function__(function, types, args, kwargs)
elif function in FUNCTION_HELPERS:
function_helper = FUNCTION_HELPERS[function]
try:
args, kwargs, unit, out = function_helper(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
result = super().__array_function__(function, types, args, kwargs)
# Fall through to return section
elif function in DISPATCHED_FUNCTIONS:
dispatched_function = DISPATCHED_FUNCTIONS[function]
try:
result, unit, out = dispatched_function(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
# Fall through to return section
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
warnings.warn("function '{}' is not known to astropy's Quantity. "
"Will run it anyway, hoping it will treat ndarray "
"subclasses correctly. Please raise an issue at "
"https://github.com/astropy/astropy/issues. "
.format(function.__name__), AstropyWarning)
return super().__array_function__(function, types, args, kwargs)
# If unit is None, a plain array is expected (e.g., boolean), which
# means we're done.
# We're also done if the result was NotImplemented, which can happen
# if other inputs/outputs override __array_function__;
# hopefully, they can then deal with us.
if unit is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out=out)
def _not_implemented_or_raise(self, function, types):
# Our function helper or dispatcher found that the function does not
# work with Quantity. In principle, there may be another class that
# knows what to do with us, for which we should return NotImplemented.
# But if there is ndarray (or a non-Quantity subclass of it) around,
# it quite likely coerces, so we should just break.
if any(issubclass(t, np.ndarray) and not issubclass(t, Quantity)
for t in types):
raise TypeError("the Quantity implementation cannot handle {} "
"with the given arguments."
.format(function)) from None
else:
return NotImplemented
# Calculation -- override ndarray methods to take into account units.
# We use the corresponding numpy functions to evaluate the results, since
# the methods do not always allow calling with keyword arguments.
# For instance, np.array([0.,2.]).clip(a_min=0., a_max=1.) gives
# TypeError: 'a_max' is an invalid keyword argument for this function.
def _wrap_function(self, function, *args, unit=None, out=None, **kwargs):
"""Wrap a numpy function that processes self, returning a Quantity.
Parameters
----------
function : callable
Numpy function to wrap.
args : positional arguments
Any positional arguments to the function beyond the first argument
(which will be set to ``self``).
kwargs : keyword arguments
Keyword arguments to the function.
If present, the following arguments are treated specially:
unit : `~astropy.units.Unit`
Unit of the output result. If not given, the unit of ``self``.
out : `~astropy.units.Quantity`
A Quantity instance in which to store the output.
Notes
-----
Output should always be assigned via a keyword argument, otherwise
no proper account of the unit is taken.
Returns
-------
out : `~astropy.units.Quantity`
Result of the function call, with the unit set properly.
"""
if unit is None:
unit = self.unit
# Ensure we don't loop back by turning any Quantity into array views.
args = (self.value,) + tuple((arg.value if isinstance(arg, Quantity)
else arg) for arg in args)
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
arrays = tuple(arg for arg in args if isinstance(arg, np.ndarray))
kwargs['out'] = check_output(out, unit, arrays, function=function)
# Apply the function and turn it back into a Quantity.
result = function(*args, **kwargs)
return self._result_as_quantity(result, unit, out)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return self._wrap_function(np.trace, offset, axis1, axis2, dtype,
out=out)
if NUMPY_LT_1_20:
def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
return self._wrap_function(np.var, axis, dtype,
out=out, ddof=ddof, keepdims=keepdims,
unit=self.unit**2)
else:
def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True):
return self._wrap_function(np.var, axis, dtype,
out=out, ddof=ddof, keepdims=keepdims, where=where,
unit=self.unit**2)
if NUMPY_LT_1_20:
def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof,
keepdims=keepdims)
else:
def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True):
return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof,
keepdims=keepdims, where=where)
if NUMPY_LT_1_20:
def mean(self, axis=None, dtype=None, out=None, keepdims=False):
return self._wrap_function(np.mean, axis, dtype, out=out,
keepdims=keepdims)
else:
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
return self._wrap_function(np.mean, axis, dtype, out=out,
keepdims=keepdims, where=where)
def round(self, decimals=0, out=None):
return self._wrap_function(np.round, decimals, out=out)
def dot(self, b, out=None):
result_unit = self.unit * getattr(b, 'unit', dimensionless_unscaled)
return self._wrap_function(np.dot, b, out=out, unit=result_unit)
# Calculation: override methods that do not make sense.
def all(self, axis=None, out=None):
raise TypeError("cannot evaluate truth value of quantities. "
"Evaluate array with q.value.all(...)")
def any(self, axis=None, out=None):
raise TypeError("cannot evaluate truth value of quantities. "
"Evaluate array with q.value.any(...)")
# Calculation: numpy functions that can be overridden with methods.
def diff(self, n=1, axis=-1):
return self._wrap_function(np.diff, n, axis)
def ediff1d(self, to_end=None, to_begin=None):
return self._wrap_function(np.ediff1d, to_end, to_begin)
if NUMPY_LT_1_22:
def nansum(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.nansum, axis,
out=out, keepdims=keepdims)
else:
# TODO: deprecate this method? It is not on ndarray, and we do not
# support nanmean, etc., so why this one?
def nansum(self, axis=None, out=None, keepdims=False, *, initial=None, where=True):
if initial is not None:
initial = self._to_own_unit(initial)
return self._wrap_function(np.nansum, axis,
out=out, keepdims=keepdims, initial=initial, where=where)
def insert(self, obj, values, axis=None):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.units.Quantity` object.
This is a thin wrapper around the `numpy.insert` function.
Parameters
----------
obj : int, slice or sequence of int
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Values to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
The unit of ``values`` must be consistent with this quantity.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the quantity array is flattened before insertion.
Returns
-------
out : `~astropy.units.Quantity`
A copy of quantity with ``values`` inserted. Note that the
insertion does not occur in-place: a new quantity array is returned.
Examples
--------
>>> import astropy.units as u
>>> q = [1, 2] * u.m
>>> q.insert(0, 50 * u.cm)
<Quantity [ 0.5, 1., 2.] m>
>>> q = [[1, 2], [3, 4]] * u.m
>>> q.insert(1, [10, 20] * u.m, axis=0)
<Quantity [[ 1., 2.],
[ 10., 20.],
[ 3., 4.]] m>
>>> q.insert(1, 10 * u.m, axis=1)
<Quantity [[ 1., 10., 2.],
[ 3., 10., 4.]] m>
"""
out_array = np.insert(self.value, obj, self._to_own_unit(values), axis)
return self._new_view(out_array)
class SpecificTypeQuantity(Quantity):
"""Superclass for Quantities of specific physical type.
Subclasses of these work just like :class:`~astropy.units.Quantity`, except
that they are for specific physical types (and may have methods that are
only appropriate for that type). Astropy examples are
:class:`~astropy.coordinates.Angle` and
:class:`~astropy.coordinates.Distance`
At a minimum, subclasses should set ``_equivalent_unit`` to the unit
associated with the physical type.
"""
# The unit for the specific physical type. Instances can only be created
# with units that are equivalent to this.
_equivalent_unit = None
# The default unit used for views. Even with `None`, views of arrays
# without units are possible, but will have an uninitialized unit.
_unit = None
# Default unit for initialization through the constructor.
_default_unit = None
# ensure that we get precedence over our superclass.
__array_priority__ = Quantity.__array_priority__ + 10
def __quantity_subclass__(self, unit):
if unit.is_equivalent(self._equivalent_unit):
return type(self), True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if unit is None or not unit.is_equivalent(self._equivalent_unit):
raise UnitTypeError(
"{} instances require units equivalent to '{}'"
.format(type(self).__name__, self._equivalent_unit) +
(", but no unit was given." if unit is None else
f", so cannot set it to '{unit}'."))
super()._set_unit(unit)
def isclose(a, b, rtol=1.e-5, atol=None, equal_nan=False, **kwargs):
"""
Return a boolean array where two arrays are element-wise equal
within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.isclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See also
--------
allclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.isclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def allclose(a, b, rtol=1.e-5, atol=None, equal_nan=False, **kwargs) -> bool:
"""
Whether two arrays are element-wise equal within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.allclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See also
--------
isclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.allclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = Quantity(actual, subok=True, copy=False)
desired = Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'desired' ({desired.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
if atol is None:
# By default, we assume an absolute tolerance of zero in the
# appropriate units. The default value of None for atol is
# needed because the units of atol must be consistent with the
# units for a and b.
atol = Quantity(0)
else:
atol = Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'atol' ({atol.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
rtol = Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(dimensionless_unscaled)
except Exception:
raise UnitsError("'rtol' should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
|
8ec551f4e1e52934b30c32914e089688040da23fff56c16e346a66e7b44f96ab | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines the SI units. They are also available in the
`astropy.units` namespace.
"""
import numpy as _numpy
from astropy.constants import si as _si
from .core import Unit, UnitBase, def_unit
_ns = globals()
###########################################################################
# DIMENSIONLESS
def_unit(['percent', 'pct'], Unit(0.01), namespace=_ns, prefixes=False,
doc="percent: one hundredth of unity, factor 0.01",
format={'generic': '%', 'console': '%', 'cds': '%',
'latex': r'\%', 'unicode': '%'})
###########################################################################
# LENGTH
def_unit(['m', 'meter'], namespace=_ns, prefixes=True,
doc="meter: base unit of length in SI")
def_unit(['micron'], um, namespace=_ns,
doc="micron: alias for micrometer (um)",
format={'latex': r'\mu m', 'unicode': '\N{MICRO SIGN}m'})
def_unit(['Angstrom', 'AA', 'angstrom'], 0.1 * nm, namespace=_ns,
doc="ångström: 10 ** -10 m",
prefixes=[(['m', 'milli'], ['milli', 'm'], 1.e-3)],
format={'latex': r'\mathring{A}', 'unicode': 'Å',
'vounit': 'Angstrom'})
###########################################################################
# VOLUMES
def_unit((['l', 'L'], ['liter']), 1000 * cm ** 3.0, namespace=_ns, prefixes=True,
format={'latex': r'\mathcal{l}', 'unicode': 'ℓ'},
doc="liter: metric unit of volume")
###########################################################################
# ANGULAR MEASUREMENTS
def_unit(['rad', 'radian'], namespace=_ns, prefixes=True,
doc="radian: angular measurement of the ratio between the length "
"on an arc and its radius")
def_unit(['deg', 'degree'], _numpy.pi / 180.0 * rad, namespace=_ns,
prefixes=True,
doc="degree: angular measurement 1/360 of full rotation",
format={'latex': r'{}^{\circ}', 'unicode': '°'})
def_unit(['hourangle'], 15.0 * deg, namespace=_ns, prefixes=False,
doc="hour angle: angular measurement with 24 in a full circle",
format={'latex': r'{}^{h}', 'unicode': 'ʰ'})
def_unit(['arcmin', 'arcminute'], 1.0 / 60.0 * deg, namespace=_ns,
prefixes=True,
doc="arc minute: angular measurement",
format={'latex': r'{}^{\prime}', 'unicode': '′'})
def_unit(['arcsec', 'arcsecond'], 1.0 / 3600.0 * deg, namespace=_ns,
prefixes=True,
doc="arc second: angular measurement")
# These special formats should only be used for the non-prefix versions
arcsec._format = {'latex': r'{}^{\prime\prime}', 'unicode': '″'}
def_unit(['mas'], 0.001 * arcsec, namespace=_ns,
doc="milli arc second: angular measurement")
def_unit(['uas'], 0.000001 * arcsec, namespace=_ns,
doc="micro arc second: angular measurement",
format={'latex': r'\mu as', 'unicode': 'μas'})
def_unit(['sr', 'steradian'], rad ** 2, namespace=_ns, prefixes=True,
doc="steradian: unit of solid angle in SI")
###########################################################################
# TIME
def_unit(['s', 'second'], namespace=_ns, prefixes=True,
exclude_prefixes=['a'],
doc="second: base unit of time in SI.")
def_unit(['min', 'minute'], 60 * s, prefixes=True, namespace=_ns)
def_unit(['h', 'hour', 'hr'], 3600 * s, namespace=_ns, prefixes=True,
exclude_prefixes=['p'])
def_unit(['d', 'day'], 24 * h, namespace=_ns, prefixes=True,
exclude_prefixes=['c', 'y'])
def_unit(['sday'], 86164.09053 * s, namespace=_ns,
doc="Sidereal day (sday) is the time of one rotation of the Earth.")
def_unit(['wk', 'week'], 7 * day, namespace=_ns)
def_unit(['fortnight'], 2 * wk, namespace=_ns)
def_unit(['a', 'annum'], 365.25 * d, namespace=_ns, prefixes=True,
exclude_prefixes=['P'])
def_unit(['yr', 'year'], 365.25 * d, namespace=_ns, prefixes=True)
###########################################################################
# FREQUENCY
def_unit(['Hz', 'Hertz', 'hertz'], 1 / s, namespace=_ns, prefixes=True,
doc="Frequency")
###########################################################################
# MASS
def_unit(['kg', 'kilogram'], namespace=_ns,
doc="kilogram: base unit of mass in SI.")
def_unit(['g', 'gram'], 1.0e-3 * kg, namespace=_ns, prefixes=True,
exclude_prefixes=['k', 'kilo'])
def_unit(['t', 'tonne'], 1000 * kg, namespace=_ns,
doc="Metric tonne")
###########################################################################
# AMOUNT OF SUBSTANCE
def_unit(['mol', 'mole'], namespace=_ns, prefixes=True,
doc="mole: amount of a chemical substance in SI.")
###########################################################################
# TEMPERATURE
def_unit(
['K', 'Kelvin'], namespace=_ns, prefixes=True,
doc="Kelvin: temperature with a null point at absolute zero.")
def_unit(
['deg_C', 'Celsius'], namespace=_ns, doc='Degrees Celsius',
format={'latex': r'{}^{\circ}C', 'unicode': '°C'})
###########################################################################
# FORCE
def_unit(['N', 'Newton', 'newton'], kg * m * s ** -2, namespace=_ns,
prefixes=True, doc="Newton: force")
##########################################################################
# ENERGY
def_unit(['J', 'Joule', 'joule'], N * m, namespace=_ns, prefixes=True,
doc="Joule: energy")
def_unit(['eV', 'electronvolt'], _si.e.value * J, namespace=_ns, prefixes=True,
doc="Electron Volt")
##########################################################################
# PRESSURE
def_unit(['Pa', 'Pascal', 'pascal'], J * m ** -3, namespace=_ns, prefixes=True,
doc="Pascal: pressure")
###########################################################################
# POWER
def_unit(['W', 'Watt', 'watt'], J / s, namespace=_ns, prefixes=True,
doc="Watt: power")
###########################################################################
# ELECTRICAL
def_unit(['A', 'ampere', 'amp'], namespace=_ns, prefixes=True,
doc="ampere: base unit of electric current in SI")
def_unit(['C', 'coulomb'], A * s, namespace=_ns, prefixes=True,
doc="coulomb: electric charge")
def_unit(['V', 'Volt', 'volt'], J * C ** -1, namespace=_ns, prefixes=True,
doc="Volt: electric potential or electromotive force")
def_unit((['Ohm', 'ohm'], ['Ohm']), V * A ** -1, namespace=_ns, prefixes=True,
doc="Ohm: electrical resistance",
format={'latex': r'\Omega', 'unicode': 'Ω'})
def_unit(['S', 'Siemens', 'siemens'], A * V ** -1, namespace=_ns,
prefixes=True, doc="Siemens: electrical conductance")
def_unit(['F', 'Farad', 'farad'], C * V ** -1, namespace=_ns, prefixes=True,
doc="Farad: electrical capacitance")
###########################################################################
# MAGNETIC
def_unit(['Wb', 'Weber', 'weber'], V * s, namespace=_ns, prefixes=True,
doc="Weber: magnetic flux")
def_unit(['T', 'Tesla', 'tesla'], Wb * m ** -2, namespace=_ns, prefixes=True,
doc="Tesla: magnetic flux density")
def_unit(['H', 'Henry', 'henry'], Wb * A ** -1, namespace=_ns, prefixes=True,
doc="Henry: inductance")
###########################################################################
# ILLUMINATION
def_unit(['cd', 'candela'], namespace=_ns, prefixes=True,
doc="candela: base unit of luminous intensity in SI")
def_unit(['lm', 'lumen'], cd * sr, namespace=_ns, prefixes=True,
doc="lumen: luminous flux")
def_unit(['lx', 'lux'], lm * m ** -2, namespace=_ns, prefixes=True,
doc="lux: luminous emittance")
###########################################################################
# RADIOACTIVITY
def_unit(['Bq', 'becquerel'], 1 / s, namespace=_ns, prefixes=False,
doc="becquerel: unit of radioactivity")
def_unit(['Ci', 'curie'], Bq * 3.7e10, namespace=_ns, prefixes=False,
doc="curie: unit of radioactivity")
###########################################################################
# BASES
bases = {m, s, kg, A, cd, rad, K, mol}
###########################################################################
# CLEANUP
del UnitBase
del Unit
del def_unit
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
3f4b96e722bda106e67724afb599fbacc35118585a560402abaf3143f1922cd8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines miscellaneous units. They are also
available in the `astropy.units` namespace.
"""
from astropy.constants import si as _si
from . import si
from .core import UnitBase, binary_prefixes, def_unit, set_enabled_units, si_prefixes
# To ensure si units of the constants can be interpreted.
set_enabled_units([si])
import numpy as _numpy
_ns = globals()
###########################################################################
# AREAS
def_unit(['barn', 'barn'], 10 ** -28 * si.m ** 2, namespace=_ns, prefixes=True,
doc="barn: unit of area used in HEP")
###########################################################################
# ANGULAR MEASUREMENTS
def_unit(['cycle', 'cy'], 2.0 * _numpy.pi * si.rad,
namespace=_ns, prefixes=False,
doc="cycle: angular measurement, a full turn or rotation")
def_unit(['spat', 'sp'], 4.0 * _numpy.pi * si.sr,
namespace=_ns, prefixes=False,
doc="spat: the solid angle of the sphere, 4pi sr")
##########################################################################
# PRESSURE
def_unit(['bar'], 1e5 * si.Pa, namespace=_ns,
prefixes=[(['m'], ['milli'], 1.e-3)],
doc="bar: pressure")
# The torr is almost the same as mmHg but not quite.
# See https://en.wikipedia.org/wiki/Torr
# Define the unit here despite it not being an astrophysical unit.
# It may be moved if more similar units are created later.
def_unit(['Torr', 'torr'], _si.atm.value/760. * si.Pa, namespace=_ns,
prefixes=[(['m'], ['milli'], 1.e-3)],
doc="Unit of pressure based on an absolute scale, now defined as "
"exactly 1/760 of a standard atmosphere")
###########################################################################
# MASS
def_unit(['M_p'], _si.m_p, namespace=_ns, doc="Proton mass",
format={'latex': r'M_{p}', 'unicode': 'Mₚ'})
def_unit(['M_e'], _si.m_e, namespace=_ns, doc="Electron mass",
format={'latex': r'M_{e}', 'unicode': 'Mₑ'})
# Unified atomic mass unit
def_unit(['u', 'Da', 'Dalton'], _si.u, namespace=_ns,
prefixes=True, exclude_prefixes=['a', 'da'],
doc="Unified atomic mass unit")
###########################################################################
# COMPUTER
def_unit((['bit', 'b'], ['bit']), namespace=_ns,
prefixes=si_prefixes + binary_prefixes)
def_unit((['byte', 'B'], ['byte']), 8 * bit, namespace=_ns,
format={'vounit': 'byte'},
prefixes=si_prefixes + binary_prefixes,
exclude_prefixes=['d'])
def_unit((['pix', 'pixel'], ['pixel']),
format={'ogip': 'pixel', 'vounit': 'pixel'},
namespace=_ns, prefixes=True)
def_unit((['vox', 'voxel'], ['voxel']),
format={'fits': 'voxel', 'ogip': 'voxel', 'vounit': 'voxel'},
namespace=_ns, prefixes=True)
###########################################################################
# CLEANUP
del UnitBase
del def_unit
del si
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
c8e0cc4a421b2a8c09f82ed6547a7d0525a5f0cb052bd9d87c9665f08c575621 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/coordinates
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
This module contains formatting functions that are for internal use in
astropy.coordinates.angles. Mainly they are conversions from one format
of data to another.
"""
import os
import threading
from warnings import warn
import numpy as np
from .errors import (IllegalHourWarning, IllegalHourError,
IllegalMinuteWarning, IllegalMinuteError,
IllegalSecondWarning, IllegalSecondError)
from astropy.utils import format_exception, parsing
from astropy.utils.decorators import deprecated
from astropy import units as u
class _AngleParser:
"""
Parses the various angle formats including:
* 01:02:30.43 degrees
* 1 2 0 hours
* 1°2′3″
* 1d2m3s
* -1h2m3s
* 1°2′3″N
This class should not be used directly. Use `parse_angle`
instead.
"""
# For safe multi-threaded operation all class (but not instance)
# members that carry state should be thread-local. They are stored
# in the following class member
_thread_local = threading.local()
def __init__(self):
# TODO: in principle, the parser should be invalidated if we change unit
# system (from CDS to FITS, say). Might want to keep a link to the
# unit_registry used, and regenerate the parser/lexer if it changes.
# Alternatively, perhaps one should not worry at all and just pre-
# generate the parser for each release (as done for unit formats).
# For some discussion of this problem, see
# https://github.com/astropy/astropy/issues/5350#issuecomment-248770151
if '_parser' not in _AngleParser._thread_local.__dict__:
(_AngleParser._thread_local._parser,
_AngleParser._thread_local._lexer) = self._make_parser()
@classmethod
def _get_simple_unit_names(cls):
simple_units = set(
u.radian.find_equivalent_units(include_prefix_units=True))
simple_unit_names = set()
# We filter out degree and hourangle, since those are treated
# separately.
for unit in simple_units:
if unit != u.deg and unit != u.hourangle:
simple_unit_names.update(unit.names)
return sorted(simple_unit_names)
@classmethod
def _make_parser(cls):
from astropy.extern.ply import lex, yacc
# List of token names.
tokens = (
'SIGN',
'UINT',
'UFLOAT',
'COLON',
'DEGREE',
'HOUR',
'MINUTE',
'SECOND',
'SIMPLE_UNIT',
'EASTWEST',
'NORTHSOUTH'
)
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r'((\d+\.\d*)|(\.\d+))([eE][+-−]?\d+)?'
# The above includes Unicode "MINUS SIGN" \u2212. It is
# important to include the hyphen last, or the regex will
# treat this as a range.
t.value = float(t.value.replace('−', '-'))
return t
def t_UINT(t):
r'\d+'
t.value = int(t.value)
return t
def t_SIGN(t):
r'[+−-]'
# The above include Unicode "MINUS SIGN" \u2212. It is
# important to include the hyphen last, or the regex will
# treat this as a range.
if t.value == '+':
t.value = 1.0
else:
t.value = -1.0
return t
def t_EASTWEST(t):
r'[EW]$'
t.value = -1.0 if t.value == 'W' else 1.0
return t
def t_NORTHSOUTH(t):
r'[NS]$'
# We cannot use lower-case letters otherwise we'll confuse
# s[outh] with s[econd]
t.value = -1.0 if t.value == 'S' else 1.0
return t
def t_SIMPLE_UNIT(t):
t.value = u.Unit(t.value)
return t
t_SIMPLE_UNIT.__doc__ = '|'.join(
f'(?:{x})' for x in cls._get_simple_unit_names())
t_COLON = ':'
t_DEGREE = r'd(eg(ree(s)?)?)?|°'
t_HOUR = r'hour(s)?|h(r)?|ʰ'
t_MINUTE = r'm(in(ute(s)?)?)?|′|\'|ᵐ'
t_SECOND = r's(ec(ond(s)?)?)?|″|\"|ˢ'
# A string containing ignored characters (spaces)
t_ignore = ' '
# Error handling rule
def t_error(t):
raise ValueError(
f"Invalid character at col {t.lexpos}")
lexer = parsing.lex(lextab='angle_lextab', package='astropy/coordinates')
def p_angle(p):
'''
angle : sign hms eastwest
| sign dms dir
| sign arcsecond dir
| sign arcminute dir
| sign simple dir
'''
sign = p[1] * p[3]
value, unit = p[2]
if isinstance(value, tuple):
p[0] = ((sign * value[0],) + value[1:], unit)
else:
p[0] = (sign * value, unit)
def p_sign(p):
'''
sign : SIGN
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_eastwest(p):
'''
eastwest : EASTWEST
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_dir(p):
'''
dir : EASTWEST
| NORTHSOUTH
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_ufloat(p):
'''
ufloat : UFLOAT
| UINT
'''
p[0] = p[1]
def p_colon(p):
'''
colon : UINT COLON ufloat
| UINT COLON UINT COLON ufloat
'''
if len(p) == 4:
p[0] = (p[1], p[3])
elif len(p) == 6:
p[0] = (p[1], p[3], p[5])
def p_spaced(p):
'''
spaced : UINT ufloat
| UINT UINT ufloat
'''
if len(p) == 3:
p[0] = (p[1], p[2])
elif len(p) == 4:
p[0] = (p[1], p[2], p[3])
def p_generic(p):
'''
generic : colon
| spaced
| ufloat
'''
p[0] = p[1]
def p_hms(p):
'''
hms : UINT HOUR
| UINT HOUR ufloat
| UINT HOUR UINT MINUTE
| UINT HOUR UFLOAT MINUTE
| UINT HOUR UINT MINUTE ufloat
| UINT HOUR UINT MINUTE ufloat SECOND
| generic HOUR
'''
if len(p) == 3:
p[0] = (p[1], u.hourangle)
elif len(p) in (4, 5):
p[0] = ((p[1], p[3]), u.hourangle)
elif len(p) in (6, 7):
p[0] = ((p[1], p[3], p[5]), u.hourangle)
def p_dms(p):
'''
dms : UINT DEGREE
| UINT DEGREE ufloat
| UINT DEGREE UINT MINUTE
| UINT DEGREE UFLOAT MINUTE
| UINT DEGREE UINT MINUTE ufloat
| UINT DEGREE UINT MINUTE ufloat SECOND
| generic DEGREE
'''
if len(p) == 3:
p[0] = (p[1], u.degree)
elif len(p) in (4, 5):
p[0] = ((p[1], p[3]), u.degree)
elif len(p) in (6, 7):
p[0] = ((p[1], p[3], p[5]), u.degree)
def p_simple(p):
'''
simple : generic
| generic SIMPLE_UNIT
'''
if len(p) == 2:
p[0] = (p[1], None)
else:
p[0] = (p[1], p[2])
def p_arcsecond(p):
'''
arcsecond : generic SECOND
'''
p[0] = (p[1], u.arcsecond)
def p_arcminute(p):
'''
arcminute : generic MINUTE
'''
p[0] = (p[1], u.arcminute)
def p_error(p):
raise ValueError
parser = parsing.yacc(tabmodule='angle_parsetab', package='astropy/coordinates')
return parser, lexer
def parse(self, angle, unit, debug=False):
try:
found_angle, found_unit = self._thread_local._parser.parse(
angle, lexer=self._thread_local._lexer, debug=debug)
except ValueError as e:
if str(e):
raise ValueError(f"{str(e)} in angle {angle!r}") from e
else:
raise ValueError(
f"Syntax error parsing angle {angle!r}") from e
if unit is None and found_unit is None:
raise u.UnitsError("No unit specified")
return found_angle, found_unit
def _check_hour_range(hrs):
"""
Checks that the given value is in the range (-24, 24).
"""
if np.any(np.abs(hrs) == 24.):
warn(IllegalHourWarning(hrs, 'Treating as 24 hr'))
elif np.any(hrs < -24.) or np.any(hrs > 24.):
raise IllegalHourError(hrs)
def _check_minute_range(m):
"""
Checks that the given value is in the range [0,60]. If the value
is equal to 60, then a warning is raised.
"""
if np.any(m == 60.):
warn(IllegalMinuteWarning(m, 'Treating as 0 min, +1 hr/deg'))
elif np.any(m < -60.) or np.any(m > 60.):
# "Error: minutes not in range [-60,60) ({0}).".format(min))
raise IllegalMinuteError(m)
def _check_second_range(sec):
"""
Checks that the given value is in the range [0,60]. If the value
is equal to 60, then a warning is raised.
"""
if np.any(sec == 60.):
warn(IllegalSecondWarning(sec, 'Treating as 0 sec, +1 min'))
elif sec is None:
pass
elif np.any(sec < -60.) or np.any(sec > 60.):
# "Error: seconds not in range [-60,60) ({0}).".format(sec))
raise IllegalSecondError(sec)
def check_hms_ranges(h, m, s):
"""
Checks that the given hour, minute and second are all within
reasonable range.
"""
_check_hour_range(h)
_check_minute_range(m)
_check_second_range(s)
return None
def parse_angle(angle, unit=None, debug=False):
"""
Parses an input string value into an angle value.
Parameters
----------
angle : str
A string representing the angle. May be in one of the following forms:
* 01:02:30.43 degrees
* 1 2 0 hours
* 1°2′3″
* 1d2m3s
* -1h2m3s
unit : `~astropy.units.UnitBase` instance, optional
The unit used to interpret the string. If ``unit`` is not
provided, the unit must be explicitly represented in the
string, either at the end or as number separators.
debug : bool, optional
If `True`, print debugging information from the parser.
Returns
-------
value, unit : tuple
``value`` is the value as a floating point number or three-part
tuple, and ``unit`` is a `Unit` instance which is either the
unit passed in or the one explicitly mentioned in the input
string.
"""
return _AngleParser().parse(angle, unit, debug=debug)
def degrees_to_dms(d):
"""
Convert a floating-point degree value into a ``(degree, arcminute,
arcsecond)`` tuple.
"""
sign = np.copysign(1.0, d)
(df, d) = np.modf(np.abs(d)) # (degree fraction, degree)
(mf, m) = np.modf(df * 60.) # (minute fraction, minute)
s = mf * 60.
return np.floor(sign * d), sign * np.floor(m), sign * s
@deprecated("dms_to_degrees (or creating an Angle with a tuple) has ambiguous "
"behavior when the degree value is 0",
alternative="another way of creating angles instead (e.g. a less "
"ambiguous string like '-0d1m2.3s'")
def dms_to_degrees(d, m, s=None):
"""
Convert degrees, arcminute, arcsecond to a float degrees value.
"""
_check_minute_range(m)
_check_second_range(s)
# determine sign
sign = np.copysign(1.0, d)
try:
d = np.floor(np.abs(d))
if s is None:
m = np.abs(m)
s = 0
else:
m = np.floor(np.abs(m))
s = np.abs(s)
except ValueError as err:
raise ValueError(format_exception(
"{func}: dms values ({1[0]},{2[1]},{3[2]}) could not be "
"converted to numbers.", d, m, s)) from err
return sign * (d + m / 60. + s / 3600.)
@deprecated("hms_to_hours (or creating an Angle with a tuple) has ambiguous "
"behavior when the hour value is 0",
alternative="another way of creating angles instead (e.g. a less "
"ambiguous string like '-0h1m2.3s'")
def hms_to_hours(h, m, s=None):
"""
Convert hour, minute, second to a float hour value.
"""
check_hms_ranges(h, m, s)
# determine sign
sign = np.copysign(1.0, h)
try:
h = np.floor(np.abs(h))
if s is None:
m = np.abs(m)
s = 0
else:
m = np.floor(np.abs(m))
s = np.abs(s)
except ValueError as err:
raise ValueError(format_exception(
"{func}: HMS values ({1[0]},{2[1]},{3[2]}) could not be "
"converted to numbers.", h, m, s)) from err
return sign * (h + m / 60. + s / 3600.)
def hms_to_degrees(h, m, s):
"""
Convert hour, minute, second to a float degrees value.
"""
return hms_to_hours(h, m, s) * 15.
def hms_to_radians(h, m, s):
"""
Convert hour, minute, second to a float radians value.
"""
return u.degree.to(u.radian, hms_to_degrees(h, m, s))
def hms_to_dms(h, m, s):
"""
Convert degrees, arcminutes, arcseconds to an ``(hour, minute, second)``
tuple.
"""
return degrees_to_dms(hms_to_degrees(h, m, s))
def hours_to_decimal(h):
"""
Convert any parseable hour value into a float value.
"""
from . import angles
return angles.Angle(h, unit=u.hourangle).hour
def hours_to_radians(h):
"""
Convert an angle in Hours to Radians.
"""
return u.hourangle.to(u.radian, h)
def hours_to_hms(h):
"""
Convert an floating-point hour value into an ``(hour, minute,
second)`` tuple.
"""
sign = np.copysign(1.0, h)
(hf, h) = np.modf(np.abs(h)) # (degree fraction, degree)
(mf, m) = np.modf(hf * 60.0) # (minute fraction, minute)
s = mf * 60.0
return (np.floor(sign * h), sign * np.floor(m), sign * s)
def radians_to_degrees(r):
"""
Convert an angle in Radians to Degrees.
"""
return u.radian.to(u.degree, r)
def radians_to_hours(r):
"""
Convert an angle in Radians to Hours.
"""
return u.radian.to(u.hourangle, r)
def radians_to_hms(r):
"""
Convert an angle in Radians to an ``(hour, minute, second)`` tuple.
"""
hours = radians_to_hours(r)
return hours_to_hms(hours)
def radians_to_dms(r):
"""
Convert an angle in Radians to an ``(degree, arcminute,
arcsecond)`` tuple.
"""
degrees = u.radian.to(u.degree, r)
return degrees_to_dms(degrees)
def sexagesimal_to_string(values, precision=None, pad=False, sep=(':',),
fields=3):
"""
Given an already separated tuple of sexagesimal values, returns
a string.
See `hours_to_string` and `degrees_to_string` for a higher-level
interface to this functionality.
"""
# Check to see if values[0] is negative, using np.copysign to handle -0
sign = np.copysign(1.0, values[0])
# If the coordinates are negative, we need to take the absolute values.
# We use np.abs because abs(-0) is -0
# TODO: Is this true? (MHvK, 2018-02-01: not on my system)
values = [np.abs(value) for value in values]
if pad:
if sign == -1:
pad = 3
else:
pad = 2
else:
pad = 0
if not isinstance(sep, tuple):
sep = tuple(sep)
if fields < 1 or fields > 3:
raise ValueError(
"fields must be 1, 2, or 3")
if not sep: # empty string, False, or None, etc.
sep = ('', '', '')
elif len(sep) == 1:
if fields == 3:
sep = sep + (sep[0], '')
elif fields == 2:
sep = sep + ('', '')
else:
sep = ('', '', '')
elif len(sep) == 2:
sep = sep + ('',)
elif len(sep) != 3:
raise ValueError(
"Invalid separator specification for converting angle to string.")
# Simplify the expression based on the requested precision. For
# example, if the seconds will round up to 60, we should convert
# it to 0 and carry upwards. If the field is hidden (by the
# fields kwarg) we round up around the middle, 30.0.
if precision is None:
rounding_thresh = 60.0 - (10.0 ** -8)
else:
rounding_thresh = 60.0 - (10.0 ** -precision)
if fields == 3 and values[2] >= rounding_thresh:
values[2] = 0.0
values[1] += 1.0
elif fields < 3 and values[2] >= 30.0:
values[1] += 1.0
if fields >= 2 and values[1] >= 60.0:
values[1] = 0.0
values[0] += 1.0
elif fields < 2 and values[1] >= 30.0:
values[0] += 1.0
literal = []
last_value = ''
literal.append('{0:0{pad}.0f}{sep[0]}')
if fields >= 2:
literal.append('{1:02d}{sep[1]}')
if fields == 3:
if precision is None:
last_value = f'{abs(values[2]):.8f}'
last_value = last_value.rstrip('0').rstrip('.')
else:
last_value = '{0:.{precision}f}'.format(
abs(values[2]), precision=precision)
if len(last_value) == 1 or last_value[1] == '.':
last_value = '0' + last_value
literal.append('{last_value}{sep[2]}')
literal = ''.join(literal)
return literal.format(np.copysign(values[0], sign),
int(values[1]), values[2],
sep=sep, pad=pad,
last_value=last_value)
def hours_to_string(h, precision=5, pad=False, sep=('h', 'm', 's'),
fields=3):
"""
Takes a decimal hour value and returns a string formatted as hms with
separator specified by the 'sep' parameter.
``h`` must be a scalar.
"""
h, m, s = hours_to_hms(h)
return sexagesimal_to_string((h, m, s), precision=precision, pad=pad,
sep=sep, fields=fields)
def degrees_to_string(d, precision=5, pad=False, sep=':', fields=3):
"""
Takes a decimal hour value and returns a string formatted as dms with
separator specified by the 'sep' parameter.
``d`` must be a scalar.
"""
d, m, s = degrees_to_dms(d)
return sexagesimal_to_string((d, m, s), precision=precision, pad=pad,
sep=sep, fields=fields)
|
2af6d0b9895280b6f0c273903b8daf794be046081600f9d24387c846025194d3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
''' This module defines custom errors and exceptions used in astropy.coordinates.
'''
from astropy.utils.exceptions import AstropyWarning
__all__ = ['RangeError', 'BoundsError', 'IllegalHourError',
'IllegalMinuteError', 'IllegalSecondError', 'ConvertError',
'IllegalHourWarning', 'IllegalMinuteWarning', 'IllegalSecondWarning',
'UnknownSiteException']
class RangeError(ValueError):
"""
Raised when some part of an angle is out of its valid range.
"""
class BoundsError(RangeError):
"""
Raised when an angle is outside of its user-specified bounds.
"""
class IllegalHourError(RangeError):
"""
Raised when an hour value is not in the range [0,24).
Parameters
----------
hour : int, float
Examples
--------
.. code-block:: python
if not 0 <= hr < 24:
raise IllegalHourError(hour)
"""
def __init__(self, hour):
self.hour = hour
def __str__(self):
return f"An invalid value for 'hours' was found ('{self.hour}'); must be in the range [0,24)."
class IllegalHourWarning(AstropyWarning):
"""
Raised when an hour value is 24.
Parameters
----------
hour : int, float
"""
def __init__(self, hour, alternativeactionstr=None):
self.hour = hour
self.alternativeactionstr = alternativeactionstr
def __str__(self):
message = f"'hour' was found to be '{self.hour}', which is not in range (-24, 24)."
if self.alternativeactionstr is not None:
message += ' ' + self.alternativeactionstr
return message
class IllegalMinuteError(RangeError):
"""
Raised when an minute value is not in the range [0,60].
Parameters
----------
minute : int, float
Examples
--------
.. code-block:: python
if not 0 <= min < 60:
raise IllegalMinuteError(minute)
"""
def __init__(self, minute):
self.minute = minute
def __str__(self):
return f"An invalid value for 'minute' was found ('{self.minute}'); should be in the range [0,60)."
class IllegalMinuteWarning(AstropyWarning):
"""
Raised when a minute value is 60.
Parameters
----------
minute : int, float
"""
def __init__(self, minute, alternativeactionstr=None):
self.minute = minute
self.alternativeactionstr = alternativeactionstr
def __str__(self):
message = f"'minute' was found to be '{self.minute}', which is not in range [0,60)."
if self.alternativeactionstr is not None:
message += ' ' + self.alternativeactionstr
return message
class IllegalSecondError(RangeError):
"""
Raised when an second value (time) is not in the range [0,60].
Parameters
----------
second : int, float
Examples
--------
.. code-block:: python
if not 0 <= sec < 60:
raise IllegalSecondError(second)
"""
def __init__(self, second):
self.second = second
def __str__(self):
return f"An invalid value for 'second' was found ('{self.second}'); should be in the range [0,60)."
class IllegalSecondWarning(AstropyWarning):
"""
Raised when a second value is 60.
Parameters
----------
second : int, float
"""
def __init__(self, second, alternativeactionstr=None):
self.second = second
self.alternativeactionstr = alternativeactionstr
def __str__(self):
message = f"'second' was found to be '{self.second}', which is not in range [0,60)."
if self.alternativeactionstr is not None:
message += ' ' + self.alternativeactionstr
return message
# TODO: consider if this should be used to `units`?
class UnitsError(ValueError):
"""
Raised if units are missing or invalid.
"""
class ConvertError(Exception):
"""
Raised if a coordinate system cannot be converted to another
"""
class UnknownSiteException(KeyError):
def __init__(self, site, attribute, close_names=None):
message = f"Site '{site}' not in database. Use {attribute} to see available sites."
if close_names:
message += " Did you mean one of: '{}'?'".format("', '".join(close_names))
self.site = site
self.attribute = attribute
self.close_names = close_names
return super().__init__(message)
|
a7f1b04eb77ec9675e08daf0dfb95b0863e57bba5add3cc447addd6406662509 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utililies used for constructing and inspecting rotation matrices.
"""
from functools import reduce
import numpy as np
from astropy import units as u
from .angles import Angle
def matrix_product(*matrices):
"""Matrix multiply all arguments together.
Arguments should have dimension 2 or larger. Larger dimensional objects
are interpreted as stacks of matrices residing in the last two dimensions.
This function mostly exists for readability: using `~numpy.matmul`
directly, one would have ``matmul(matmul(m1, m2), m3)``, etc. For even
better readability, one might consider using `~numpy.matrix` for the
arguments (so that one could write ``m1 * m2 * m3``), but then it is not
possible to handle stacks of matrices. Once only python >=3.5 is supported,
this function can be replaced by ``m1 @ m2 @ m3``.
"""
return reduce(np.matmul, matrices)
def matrix_transpose(matrix):
"""Transpose a matrix or stack of matrices by swapping the last two axes.
This function mostly exists for readability; seeing ``.swapaxes(-2, -1)``
it is not that obvious that one does a transpose. Note that one cannot
use `~numpy.ndarray.T`, as this transposes all axes and thus does not
work for stacks of matrices.
"""
return matrix.swapaxes(-2, -1)
def rotation_matrix(angle, axis='z', unit=None):
"""
Generate matrices for rotation by some angle around some axis.
Parameters
----------
angle : angle-like
The amount of rotation the matrices should represent. Can be an array.
axis : str or array-like
Either ``'x'``, ``'y'``, ``'z'``, or a (x,y,z) specifying the axis to
rotate about. If ``'x'``, ``'y'``, or ``'z'``, the rotation sense is
counterclockwise looking down the + axis (e.g. positive rotations obey
left-hand-rule). If given as an array, the last dimension should be 3;
it will be broadcast against ``angle``.
unit : unit-like, optional
If ``angle`` does not have associated units, they are in this
unit. If neither are provided, it is assumed to be degrees.
Returns
-------
rmat : `numpy.matrix`
A unitary rotation matrix.
"""
if isinstance(angle, u.Quantity):
angle = angle.to_value(u.radian)
else:
if unit is None:
angle = np.deg2rad(angle)
else:
angle = u.Unit(unit).to(u.rad, angle)
s = np.sin(angle)
c = np.cos(angle)
# use optimized implementations for x/y/z
try:
i = 'xyz'.index(axis)
except TypeError:
axis = np.asarray(axis)
axis = axis / np.sqrt((axis * axis).sum(axis=-1, keepdims=True))
R = (axis[..., np.newaxis] * axis[..., np.newaxis, :] *
(1. - c)[..., np.newaxis, np.newaxis])
for i in range(0, 3):
R[..., i, i] += c
a1 = (i + 1) % 3
a2 = (i + 2) % 3
R[..., a1, a2] += axis[..., i] * s
R[..., a2, a1] -= axis[..., i] * s
else:
a1 = (i + 1) % 3
a2 = (i + 2) % 3
R = np.zeros(getattr(angle, 'shape', ()) + (3, 3))
R[..., i, i] = 1.
R[..., a1, a1] = c
R[..., a1, a2] = s
R[..., a2, a1] = -s
R[..., a2, a2] = c
return R
def angle_axis(matrix):
"""
Angle of rotation and rotation axis for a given rotation matrix.
Parameters
----------
matrix : array-like
A 3 x 3 unitary rotation matrix (or stack of matrices).
Returns
-------
angle : `~astropy.coordinates.Angle`
The angle of rotation.
axis : array
The (normalized) axis of rotation (with last dimension 3).
"""
m = np.asanyarray(matrix)
if m.shape[-2:] != (3, 3):
raise ValueError('matrix is not 3x3')
axis = np.zeros(m.shape[:-1])
axis[..., 0] = m[..., 2, 1] - m[..., 1, 2]
axis[..., 1] = m[..., 0, 2] - m[..., 2, 0]
axis[..., 2] = m[..., 1, 0] - m[..., 0, 1]
r = np.sqrt((axis * axis).sum(-1, keepdims=True))
angle = np.arctan2(r[..., 0],
m[..., 0, 0] + m[..., 1, 1] + m[..., 2, 2] - 1.)
return Angle(angle, u.radian), -axis / r
def is_O3(matrix):
"""Check whether a matrix is in the length-preserving group O(3).
Parameters
----------
matrix : (..., N, N) array-like
Must have attribute ``.shape`` and method ``.swapaxes()`` and not error
when using `~numpy.isclose`.
Returns
-------
is_o3 : bool or array of bool
If the matrix has more than two axes, the O(3) check is performed on
slices along the last two axes -- (M, N, N) => (M, ) bool array.
Notes
-----
The orthogonal group O(3) preserves lengths, but is not guaranteed to keep
orientations. Rotations and reflections are in this group.
For more information, see https://en.wikipedia.org/wiki/Orthogonal_group
"""
# matrix is in O(3) (rotations, proper and improper).
I = np.identity(matrix.shape[-1])
is_o3 = np.all(np.isclose(matrix @ matrix.swapaxes(-2, -1), I, atol=1e-15),
axis=(-2, -1))
return is_o3
def is_rotation(matrix, allow_improper=False):
"""Check whether a matrix is a rotation, proper or improper.
Parameters
----------
matrix : (..., N, N) array-like
Must have attribute ``.shape`` and method ``.swapaxes()`` and not error
when using `~numpy.isclose` and `~numpy.linalg.det`.
allow_improper : bool, optional
Whether to restrict check to the SO(3), the group of proper rotations,
or also allow improper rotations (with determinant -1).
The default (False) is only SO(3).
Returns
-------
isrot : bool or array of bool
If the matrix has more than two axes, the checks are performed on
slices along the last two axes -- (M, N, N) => (M, ) bool array.
See Also
--------
astopy.coordinates.matrix_utilities.is_O3 :
For the less restrictive check that a matrix is in the group O(3).
Notes
-----
The group SO(3) is the rotation group. It is O(3), with determinant 1.
Rotations with determinant -1 are improper rotations, combining both a
rotation and a reflection.
For more information, see https://en.wikipedia.org/wiki/Orthogonal_group
"""
# matrix is in O(3).
is_o3 = is_O3(matrix)
# determinant checks for rotation (proper and improper)
if allow_improper: # determinant can be +/- 1
is_det1 = np.isclose(np.abs(np.linalg.det(matrix)), 1.0)
else: # restrict to SO(3)
is_det1 = np.isclose(np.linalg.det(matrix), 1.0)
return is_o3 & is_det1
|
c91e69157f2030377cd5172678a1b220493bbfd5658ed03cb08305f434db766d | import re
import copy
import warnings
import operator
import numpy as np
import erfa
from astropy.utils.compat.misc import override__dir__
from astropy import units as u
from astropy.constants import c as speed_of_light
from astropy.utils.data_info import MixinInfo
from astropy.utils import ShapedLikeNDArray
from astropy.table import QTable
from astropy.time import Time
from astropy.utils.exceptions import AstropyUserWarning
from .distances import Distance
from .angles import Angle
from .baseframe import (BaseCoordinateFrame, frame_transform_graph,
GenericFrame)
from .builtin_frames import ICRS, SkyOffsetFrame
from .representation import (RadialDifferential, SphericalDifferential,
SphericalRepresentation,
UnitSphericalCosLatDifferential,
UnitSphericalDifferential,
UnitSphericalRepresentation)
from .sky_coordinate_parsers import (_get_frame_class, _get_frame_without_data,
_parse_coordinate_data)
__all__ = ['SkyCoord', 'SkyCoordInfo']
class SkyCoordInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = {'unit'} # Unit is read-only
_supports_indexing = False
@staticmethod
def default_format(val):
repr_data = val.info._repr_data
formats = ['{0.' + compname + '.value:}' for compname
in repr_data.components]
return ','.join(formats).format(repr_data)
@property
def unit(self):
repr_data = self._repr_data
unit = ','.join(str(getattr(repr_data, comp).unit) or 'None'
for comp in repr_data.components)
return unit
@property
def _repr_data(self):
if self._parent is None:
return None
sc = self._parent
if (issubclass(sc.representation_type, SphericalRepresentation)
and isinstance(sc.data, UnitSphericalRepresentation)):
repr_data = sc.represent_as(sc.data.__class__, in_frame_units=True)
else:
repr_data = sc.represent_as(sc.representation_type,
in_frame_units=True)
return repr_data
def _represent_as_dict(self):
sc = self._parent
attrs = list(sc.representation_component_names)
# Don't output distance unless it's actually distance.
if isinstance(sc.data, UnitSphericalRepresentation):
attrs = attrs[:-1]
diff = sc.data.differentials.get('s')
if diff is not None:
diff_attrs = list(sc.get_representation_component_names('s'))
# Don't output proper motions if they haven't been specified.
if isinstance(diff, RadialDifferential):
diff_attrs = diff_attrs[2:]
# Don't output radial velocity unless it's actually velocity.
elif isinstance(diff, (UnitSphericalDifferential,
UnitSphericalCosLatDifferential)):
diff_attrs = diff_attrs[:-1]
attrs.extend(diff_attrs)
attrs.extend(frame_transform_graph.frame_attributes.keys())
out = super()._represent_as_dict(attrs)
out['representation_type'] = sc.representation_type.get_name()
out['frame'] = sc.frame.name
# Note that sc.info.unit is a fake composite unit (e.g. 'deg,deg,None'
# or None,None,m) and is not stored. The individual attributes have
# units.
return out
def new_like(self, skycoords, length, metadata_conflicts='warn', name=None):
"""
Return a new SkyCoord instance which is consistent with the input
SkyCoord objects ``skycoords`` and has ``length`` rows. Being
"consistent" is defined as being able to set an item from one to each of
the rest without any exception being raised.
This is intended for creating a new SkyCoord instance whose elements can
be set in-place for table operations like join or vstack. This is used
when a SkyCoord object is used as a mixin column in an astropy Table.
The data values are not predictable and it is expected that the consumer
of the object will fill in all values.
Parameters
----------
skycoords : list
List of input SkyCoord objects
length : int
Length of the output skycoord object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output name (sets output skycoord.info.name)
Returns
-------
skycoord : SkyCoord (or subclass)
Instance of this class consistent with ``skycoords``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(skycoords, metadata_conflicts, name,
('meta', 'description'))
skycoord0 = skycoords[0]
# Make a new SkyCoord object with the desired length and attributes
# by using the _apply / __getitem__ machinery to effectively return
# skycoord0[[0, 0, ..., 0, 0]]. This will have the all the right frame
# attributes with the right shape.
indexes = np.zeros(length, dtype=np.int64)
out = skycoord0[indexes]
# Use __setitem__ machinery to check for consistency of all skycoords
for skycoord in skycoords[1:]:
try:
out[0] = skycoord[0]
except Exception as err:
raise ValueError(f'Input skycoords are inconsistent.') from err
# Set (merged) info attributes
for attr in ('name', 'meta', 'description'):
if attr in attrs:
setattr(out.info, attr, attrs[attr])
return out
class SkyCoord(ShapedLikeNDArray):
"""High-level object providing a flexible interface for celestial coordinate
representation, manipulation, and transformation between systems.
The `SkyCoord` class accepts a wide variety of inputs for initialization. At
a minimum these must provide one or more celestial coordinate values with
unambiguous units. Inputs may be scalars or lists/tuples/arrays, yielding
scalar or array coordinates (can be checked via ``SkyCoord.isscalar``).
Typically one also specifies the coordinate frame, though this is not
required. The general pattern for spherical representations is::
SkyCoord(COORD, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [DISTANCE], frame=FRAME, unit=UNIT, keyword_args ...)
SkyCoord([FRAME], <lon_attr>=LON, <lat_attr>=LAT, keyword_args ...)
It is also possible to input coordinate values in other representations
such as cartesian or cylindrical. In this case one includes the keyword
argument ``representation_type='cartesian'`` (for example) along with data
in ``x``, ``y``, and ``z``.
See also: https://docs.astropy.org/en/stable/coordinates/
Examples
--------
The examples below illustrate common ways of initializing a `SkyCoord`
object. For a complete description of the allowed syntax see the
full coordinates documentation. First some imports::
>>> from astropy.coordinates import SkyCoord # High-level coordinates
>>> from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames
>>> from astropy.coordinates import Angle, Latitude, Longitude # Angles
>>> import astropy.units as u
The coordinate values and frame specification can now be provided using
positional and keyword arguments::
>>> c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame
>>> c = SkyCoord([1, 2, 3], [-30, 45, 8], frame="icrs", unit="deg") # 3 coords
>>> coords = ["1:12:43.2 +31:12:43", "1 12 43.2 +31 12 43"]
>>> c = SkyCoord(coords, frame=FK4, unit=(u.hourangle, u.deg), obstime="J1992.21")
>>> c = SkyCoord("1h12m43.2s +1d12m43s", frame=Galactic) # Units from string
>>> c = SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s")
>>> ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle
>>> dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity
>>> c = SkyCoord(ra, dec, frame='icrs')
>>> c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime='2001-01-02T12:34:56')
>>> c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox
>>> c = SkyCoord(c, obstime='J2010.11', equinox='B1965') # Override defaults
>>> c = SkyCoord(w=0, u=1, v=2, unit='kpc', frame='galactic',
... representation_type='cartesian')
>>> c = SkyCoord([ICRS(ra=1*u.deg, dec=2*u.deg), ICRS(ra=3*u.deg, dec=4*u.deg)])
Velocity components (proper motions or radial velocities) can also be
provided in a similar manner::
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, radial_velocity=10*u.km/u.s)
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=2*u.mas/u.yr, pm_dec=1*u.mas/u.yr)
As shown, the frame can be a `~astropy.coordinates.BaseCoordinateFrame`
class or the corresponding string alias. The frame classes that are built in
to astropy are `ICRS`, `FK5`, `FK4`, `FK4NoETerms`, and `Galactic`.
The string aliases are simply lower-case versions of the class name, and
allow for creating a `SkyCoord` object and transforming frames without
explicitly importing the frame classes.
Parameters
----------
frame : `~astropy.coordinates.BaseCoordinateFrame` class or string, optional
Type of coordinate frame this `SkyCoord` should represent. Defaults to
to ICRS if not given or given as None.
unit : `~astropy.units.Unit`, string, or tuple of :class:`~astropy.units.Unit` or str, optional
Units for supplied coordinate values.
If only one unit is supplied then it applies to all values.
Note that passing only one unit might lead to unit conversion errors
if the coordinate values are expected to have mixed physical meanings
(e.g., angles and distances).
obstime : time-like, optional
Time(s) of observation.
equinox : time-like, optional
Coordinate frame equinox time.
representation_type : str or Representation class
Specifies the representation, e.g. 'spherical', 'cartesian', or
'cylindrical'. This affects the positional args and other keyword args
which must correspond to the given representation.
copy : bool, optional
If `True` (default), a copy of any coordinate data is made. This
argument can only be passed in as a keyword argument.
**keyword_args
Other keyword arguments as applicable for user-defined coordinate frames.
Common options include:
ra, dec : angle-like, optional
RA and Dec for frames where ``ra`` and ``dec`` are keys in the
frame's ``representation_component_names``, including `ICRS`,
`FK5`, `FK4`, and `FK4NoETerms`.
pm_ra_cosdec, pm_dec : `~astropy.units.Quantity` ['angular speed'], optional
Proper motion components, in angle per time units.
l, b : angle-like, optional
Galactic ``l`` and ``b`` for for frames where ``l`` and ``b`` are
keys in the frame's ``representation_component_names``, including
the `Galactic` frame.
pm_l_cosb, pm_b : `~astropy.units.Quantity` ['angular speed'], optional
Proper motion components in the `Galactic` frame, in angle per time
units.
x, y, z : float or `~astropy.units.Quantity` ['length'], optional
Cartesian coordinates values
u, v, w : float or `~astropy.units.Quantity` ['length'], optional
Cartesian coordinates values for the Galactic frame.
radial_velocity : `~astropy.units.Quantity` ['speed'], optional
The component of the velocity along the line-of-sight (i.e., the
radial direction), in velocity units.
"""
# Declare that SkyCoord can be used as a Table column by defining the
# info property.
info = SkyCoordInfo()
def __init__(self, *args, copy=True, **kwargs):
# these are frame attributes set on this SkyCoord but *not* a part of
# the frame object this SkyCoord contains
self._extra_frameattr_names = set()
# If all that is passed in is a frame instance that already has data,
# we should bypass all of the parsing and logic below. This is here
# to make this the fastest way to create a SkyCoord instance. Many of
# the classmethods implemented for performance enhancements will use
# this as the initialization path
if (len(args) == 1 and len(kwargs) == 0
and isinstance(args[0], (BaseCoordinateFrame, SkyCoord))):
coords = args[0]
if isinstance(coords, SkyCoord):
self._extra_frameattr_names = coords._extra_frameattr_names
self.info = coords.info
# Copy over any extra frame attributes
for attr_name in self._extra_frameattr_names:
# Setting it will also validate it.
setattr(self, attr_name, getattr(coords, attr_name))
coords = coords.frame
if not coords.has_data:
raise ValueError('Cannot initialize from a coordinate frame '
'instance without coordinate data')
if copy:
self._sky_coord_frame = coords.copy()
else:
self._sky_coord_frame = coords
else:
# Get the frame instance without coordinate data but with all frame
# attributes set - these could either have been passed in with the
# frame as an instance, or passed in as kwargs here
frame_cls, frame_kwargs = _get_frame_without_data(args, kwargs)
# Parse the args and kwargs to assemble a sanitized and validated
# kwargs dict for initializing attributes for this object and for
# creating the internal self._sky_coord_frame object
args = list(args) # Make it mutable
skycoord_kwargs, components, info = _parse_coordinate_data(
frame_cls(**frame_kwargs), args, kwargs)
# In the above two parsing functions, these kwargs were identified
# as valid frame attributes for *some* frame, but not the frame that
# this SkyCoord will have. We keep these attributes as special
# skycoord frame attributes:
for attr in skycoord_kwargs:
# Setting it will also validate it.
setattr(self, attr, skycoord_kwargs[attr])
if info is not None:
self.info = info
# Finally make the internal coordinate object.
frame_kwargs.update(components)
self._sky_coord_frame = frame_cls(copy=copy, **frame_kwargs)
if not self._sky_coord_frame.has_data:
raise ValueError('Cannot create a SkyCoord without data')
@property
def frame(self):
return self._sky_coord_frame
@property
def representation_type(self):
return self.frame.representation_type
@representation_type.setter
def representation_type(self, value):
self.frame.representation_type = value
# TODO: remove these in future
@property
def representation(self):
return self.frame.representation
@representation.setter
def representation(self, value):
self.frame.representation = value
@property
def shape(self):
return self.frame.shape
def __eq__(self, value):
"""Equality operator for SkyCoord
This implements strict equality and requires that the frames are
equivalent, extra frame attributes are equivalent, and that the
representation data are exactly equal.
"""
if not isinstance(value, SkyCoord):
return NotImplemented
# Make sure that any extra frame attribute names are equivalent.
for attr in self._extra_frameattr_names | value._extra_frameattr_names:
if not self.frame._frameattr_equiv(getattr(self, attr),
getattr(value, attr)):
raise ValueError(f"cannot compare: extra frame attribute "
f"'{attr}' is not equivalent "
f"(perhaps compare the frames directly to avoid "
f"this exception)")
return self._sky_coord_frame == value._sky_coord_frame
def __ne__(self, value):
return np.logical_not(self == value)
def _apply(self, method, *args, **kwargs):
"""Create a new instance, applying a method to the underlying data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be
applied to the underlying arrays in the representation (e.g., ``x``,
``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),
as well as to any frame attributes that have a shape, with the results
used to create a new instance.
Internally, it is also used to apply functions to the above parts
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
def apply_method(value):
if isinstance(value, ShapedLikeNDArray):
return value._apply(method, *args, **kwargs)
else:
if callable(method):
return method(value, *args, **kwargs)
else:
return getattr(value, method)(*args, **kwargs)
# create a new but empty instance, and copy over stuff
new = super().__new__(self.__class__)
new._sky_coord_frame = self._sky_coord_frame._apply(method,
*args, **kwargs)
new._extra_frameattr_names = self._extra_frameattr_names.copy()
for attr in self._extra_frameattr_names:
value = getattr(self, attr)
if getattr(value, 'shape', ()):
value = apply_method(value)
elif method == 'copy' or method == 'flatten':
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
value = copy.copy(value)
setattr(new, '_' + attr, value)
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if 'info' in self.__dict__:
new.info = self.info
return new
def __setitem__(self, item, value):
"""Implement self[item] = value for SkyCoord
The right hand ``value`` must be strictly consistent with self:
- Identical class
- Equivalent frames
- Identical representation_types
- Identical representation differentials keys
- Identical frame attributes
- Identical "extra" frame attributes (e.g. obstime for an ICRS coord)
With these caveats the setitem ends up as effectively a setitem on
the representation data.
self.frame.data[item] = value.frame.data
"""
if self.__class__ is not value.__class__:
raise TypeError(f'can only set from object of same class: '
f'{self.__class__.__name__} vs. '
f'{value.__class__.__name__}')
# Make sure that any extra frame attribute names are equivalent.
for attr in self._extra_frameattr_names | value._extra_frameattr_names:
if not self.frame._frameattr_equiv(getattr(self, attr),
getattr(value, attr)):
raise ValueError(f'attribute {attr} is not equivalent')
# Set the frame values. This checks frame equivalence and also clears
# the cache to ensure that the object is not in an inconsistent state.
self._sky_coord_frame[item] = value._sky_coord_frame
def insert(self, obj, values, axis=0):
"""
Insert coordinate values before the given indices in the object and
return a new Frame object.
The values to be inserted must conform to the rules for in-place setting
of ``SkyCoord`` objects.
The API signature matches the ``np.insert`` API, but is more limited.
The specification of insert index ``obj`` must be a single integer,
and the ``axis`` must be ``0`` for simple insertion before the index.
Parameters
----------
obj : int
Integer index before which ``values`` is inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
axis : int, optional
Axis along which to insert ``values``. Default is 0, which is the
only allowed value and will insert a row.
Returns
-------
out : `~astropy.coordinates.SkyCoord` instance
New coordinate object with inserted value(s)
"""
# Validate inputs: obj arg is integer, axis=0, self is not a scalar, and
# input index is in bounds.
try:
idx0 = operator.index(obj)
except TypeError:
raise TypeError('obj arg must be an integer')
if axis != 0:
raise ValueError('axis must be 0')
if not self.shape:
raise TypeError('cannot insert into scalar {} object'
.format(self.__class__.__name__))
if abs(idx0) > len(self):
raise IndexError('index {} is out of bounds for axis 0 with size {}'
.format(idx0, len(self)))
# Turn negative index into positive
if idx0 < 0:
idx0 = len(self) + idx0
n_values = len(values) if values.shape else 1
# Finally make the new object with the correct length and set values for the
# three sections, before insert, the insert, and after the insert.
out = self.__class__.info.new_like([self], len(self) + n_values, name=self.info.name)
# Set the output values. This is where validation of `values` takes place to ensure
# that it can indeed be inserted.
out[:idx0] = self[:idx0]
out[idx0:idx0 + n_values] = values
out[idx0 + n_values:] = self[idx0:]
return out
def is_transformable_to(self, new_frame):
"""
Determines if this coordinate frame can be transformed to another
given frame.
Parameters
----------
new_frame : frame class, frame object, or str
The proposed frame to transform into.
Returns
-------
transformable : bool or str
`True` if this can be transformed to ``new_frame``, `False` if
not, or the string 'same' if ``new_frame`` is the same system as
this object but no transformation is defined.
Notes
-----
A return value of 'same' means the transformation will work, but it will
just give back a copy of this object. The intended usage is::
if coord.is_transformable_to(some_unknown_frame):
coord2 = coord.transform_to(some_unknown_frame)
This will work even if ``some_unknown_frame`` turns out to be the same
frame class as ``coord``. This is intended for cases where the frame
is the same regardless of the frame attributes (e.g. ICRS), but be
aware that it *might* also indicate that someone forgot to define the
transformation between two objects of the same frame class but with
different attributes.
"""
# TODO! like matplotlib, do string overrides for modified methods
new_frame = (_get_frame_class(new_frame) if isinstance(new_frame, str)
else new_frame)
return self.frame.is_transformable_to(new_frame)
def transform_to(self, frame, merge_attributes=True):
"""Transform this coordinate to a new frame.
The precise frame transformed to depends on ``merge_attributes``.
If `False`, the destination frame is used exactly as passed in.
But this is often not quite what one wants. E.g., suppose one wants to
transform an ICRS coordinate that has an obstime attribute to FK4; in
this case, one likely would want to use this information. Thus, the
default for ``merge_attributes`` is `True`, in which the precedence is
as follows: (1) explicitly set (i.e., non-default) values in the
destination frame; (2) explicitly set values in the source; (3) default
value in the destination frame.
Note that in either case, any explicitly set attributes on the source
`SkyCoord` that are not part of the destination frame's definition are
kept (stored on the resulting `SkyCoord`), and thus one can round-trip
(e.g., from FK4 to ICRS to FK4 without losing obstime).
Parameters
----------
frame : str, `BaseCoordinateFrame` class or instance, or `SkyCoord` instance
The frame to transform this coordinate into. If a `SkyCoord`, the
underlying frame is extracted, and all other information ignored.
merge_attributes : bool, optional
Whether the default attributes in the destination frame are allowed
to be overridden by explicitly set attributes in the source
(see note above; default: `True`).
Returns
-------
coord : `SkyCoord`
A new object with this coordinate represented in the `frame` frame.
Raises
------
ValueError
If there is no possible transformation route.
"""
from astropy.coordinates.errors import ConvertError
frame_kwargs = {}
# Frame name (string) or frame class? Coerce into an instance.
try:
frame = _get_frame_class(frame)()
except Exception:
pass
if isinstance(frame, SkyCoord):
frame = frame.frame # Change to underlying coord frame instance
if isinstance(frame, BaseCoordinateFrame):
new_frame_cls = frame.__class__
# Get frame attributes, allowing defaults to be overridden by
# explicitly set attributes of the source if ``merge_attributes``.
for attr in frame_transform_graph.frame_attributes:
self_val = getattr(self, attr, None)
frame_val = getattr(frame, attr, None)
if (frame_val is not None
and not (merge_attributes
and frame.is_frame_attr_default(attr))):
frame_kwargs[attr] = frame_val
elif (self_val is not None
and not self.is_frame_attr_default(attr)):
frame_kwargs[attr] = self_val
elif frame_val is not None:
frame_kwargs[attr] = frame_val
else:
raise ValueError('Transform `frame` must be a frame name, class, or instance')
# Get the composite transform to the new frame
trans = frame_transform_graph.get_transform(self.frame.__class__, new_frame_cls)
if trans is None:
raise ConvertError('Cannot transform from {} to {}'
.format(self.frame.__class__, new_frame_cls))
# Make a generic frame which will accept all the frame kwargs that
# are provided and allow for transforming through intermediate frames
# which may require one or more of those kwargs.
generic_frame = GenericFrame(frame_kwargs)
# Do the transformation, returning a coordinate frame of the desired
# final type (not generic).
new_coord = trans(self.frame, generic_frame)
# Finally make the new SkyCoord object from the `new_coord` and
# remaining frame_kwargs that are not frame_attributes in `new_coord`.
for attr in (set(new_coord.get_frame_attr_names()) &
set(frame_kwargs.keys())):
frame_kwargs.pop(attr)
# Always remove the origin frame attribute, as that attribute only makes
# sense with a SkyOffsetFrame (in which case it will be stored on the frame).
# See gh-11277.
# TODO: Should it be a property of the frame attribute that it can
# or cannot be stored on a SkyCoord?
frame_kwargs.pop('origin', None)
return self.__class__(new_coord, **frame_kwargs)
def apply_space_motion(self, new_obstime=None, dt=None):
"""
Compute the position of the source represented by this coordinate object
to a new time using the velocities stored in this object and assuming
linear space motion (including relativistic corrections). This is
sometimes referred to as an "epoch transformation."
The initial time before the evolution is taken from the ``obstime``
attribute of this coordinate. Note that this method currently does not
support evolving coordinates where the *frame* has an ``obstime`` frame
attribute, so the ``obstime`` is only used for storing the before and
after times, not actually as an attribute of the frame. Alternatively,
if ``dt`` is given, an ``obstime`` need not be provided at all.
Parameters
----------
new_obstime : `~astropy.time.Time`, optional
The time at which to evolve the position to. Requires that the
``obstime`` attribute be present on this frame.
dt : `~astropy.units.Quantity`, `~astropy.time.TimeDelta`, optional
An amount of time to evolve the position of the source. Cannot be
given at the same time as ``new_obstime``.
Returns
-------
new_coord : `SkyCoord`
A new coordinate object with the evolved location of this coordinate
at the new time. ``obstime`` will be set on this object to the new
time only if ``self`` also has ``obstime``.
"""
if (new_obstime is None and dt is None or
new_obstime is not None and dt is not None):
raise ValueError("You must specify one of `new_obstime` or `dt`, "
"but not both.")
# Validate that we have velocity info
if 's' not in self.frame.data.differentials:
raise ValueError('SkyCoord requires velocity data to evolve the '
'position.')
if 'obstime' in self.frame.frame_attributes:
raise NotImplementedError("Updating the coordinates in a frame "
"with explicit time dependence is "
"currently not supported. If you would "
"like this functionality, please open an "
"issue on github:\n"
"https://github.com/astropy/astropy")
if new_obstime is not None and self.obstime is None:
# If no obstime is already on this object, raise an error if a new
# obstime is passed: we need to know the time / epoch at which the
# the position / velocity were measured initially
raise ValueError('This object has no associated `obstime`. '
'apply_space_motion() must receive a time '
'difference, `dt`, and not a new obstime.')
# Compute t1 and t2, the times used in the starpm call, which *only*
# uses them to compute a delta-time
t1 = self.obstime
if dt is None:
# self.obstime is not None and new_obstime is not None b/c of above
# checks
t2 = new_obstime
else:
# new_obstime is definitely None b/c of the above checks
if t1 is None:
# MAGIC NUMBER: if the current SkyCoord object has no obstime,
# assume J2000 to do the dt offset. This is not actually used
# for anything except a delta-t in starpm, so it's OK that it's
# not necessarily the "real" obstime
t1 = Time('J2000')
new_obstime = None # we don't actually know the initial obstime
t2 = t1 + dt
else:
t2 = t1 + dt
new_obstime = t2
# starpm wants tdb time
t1 = t1.tdb
t2 = t2.tdb
# proper motion in RA should not include the cos(dec) term, see the
# erfa function eraStarpv, comment (4). So we convert to the regular
# spherical differentials.
icrsrep = self.icrs.represent_as(SphericalRepresentation, SphericalDifferential)
icrsvel = icrsrep.differentials['s']
parallax_zero = False
try:
plx = icrsrep.distance.to_value(u.arcsecond, u.parallax())
except u.UnitConversionError: # No distance: set to 0 by convention
plx = 0.
parallax_zero = True
try:
rv = icrsvel.d_distance.to_value(u.km/u.s)
except u.UnitConversionError: # No RV
rv = 0.
starpm = erfa.pmsafe(icrsrep.lon.radian, icrsrep.lat.radian,
icrsvel.d_lon.to_value(u.radian/u.yr),
icrsvel.d_lat.to_value(u.radian/u.yr),
plx, rv, t1.jd1, t1.jd2, t2.jd1, t2.jd2)
if parallax_zero:
new_distance = None
else:
new_distance = Distance(parallax=starpm[4] << u.arcsec)
icrs2 = ICRS(ra=u.Quantity(starpm[0], u.radian, copy=False),
dec=u.Quantity(starpm[1], u.radian, copy=False),
pm_ra=u.Quantity(starpm[2], u.radian/u.yr, copy=False),
pm_dec=u.Quantity(starpm[3], u.radian/u.yr, copy=False),
distance=new_distance,
radial_velocity=u.Quantity(starpm[5], u.km/u.s, copy=False),
differential_type=SphericalDifferential)
# Update the obstime of the returned SkyCoord, and need to carry along
# the frame attributes
frattrs = {attrnm: getattr(self, attrnm)
for attrnm in self._extra_frameattr_names}
frattrs['obstime'] = new_obstime
result = self.__class__(icrs2, **frattrs).transform_to(self.frame)
# Without this the output might not have the right differential type.
# Not sure if this fixes the problem or just hides it. See #11932
result.differential_type = self.differential_type
return result
def _is_name(self, string):
"""
Returns whether a string is one of the aliases for the frame.
"""
return (self.frame.name == string or
(isinstance(self.frame.name, list) and string in self.frame.name))
def __getattr__(self, attr):
"""
Overrides getattr to return coordinates that this can be transformed
to, based on the alias attr in the primary transform graph.
"""
if '_sky_coord_frame' in self.__dict__:
if self._is_name(attr):
return self # Should this be a deepcopy of self?
# Anything in the set of all possible frame_attr_names is handled
# here. If the attr is relevant for the current frame then delegate
# to self.frame otherwise get it from self._<attr>.
if attr in frame_transform_graph.frame_attributes:
if attr in self.frame.get_frame_attr_names():
return getattr(self.frame, attr)
else:
return getattr(self, '_' + attr, None)
# Some attributes might not fall in the above category but still
# are available through self._sky_coord_frame.
if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr):
return getattr(self._sky_coord_frame, attr)
# Try to interpret as a new frame for transforming.
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
return self.transform_to(attr)
# Fail
raise AttributeError("'{}' object has no attribute '{}'"
.format(self.__class__.__name__, attr))
def __setattr__(self, attr, val):
# This is to make anything available through __getattr__ immutable
if '_sky_coord_frame' in self.__dict__:
if self._is_name(attr):
raise AttributeError(f"'{attr}' is immutable")
if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr):
setattr(self._sky_coord_frame, attr, val)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError(f"'{attr}' is immutable")
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be set, but only via a private
# variable. See __getattr__ above.
super().__setattr__('_' + attr, val)
# Validate it
frame_transform_graph.frame_attributes[attr].__get__(self)
# And add to set of extra attributes
self._extra_frameattr_names |= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__setattr__(attr, val)
def __delattr__(self, attr):
# mirror __setattr__ above
if '_sky_coord_frame' in self.__dict__:
if self._is_name(attr):
raise AttributeError(f"'{attr}' is immutable")
if not attr.startswith('_') and hasattr(self._sky_coord_frame,
attr):
delattr(self._sky_coord_frame, attr)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError(f"'{attr}' is immutable")
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be deleted, but need to remove
# the corresponding private variable. See __getattr__ above.
super().__delattr__('_' + attr)
# Also remove it from the set of extra attributes
self._extra_frameattr_names -= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__delattr__(attr)
@override__dir__
def __dir__(self):
"""
Override the builtin `dir` behavior to include:
- Transforms available by aliases
- Attribute / methods of the underlying self.frame object
"""
# determine the aliases that this can be transformed to.
dir_values = set()
for name in frame_transform_graph.get_names():
frame_cls = frame_transform_graph.lookup_name(name)
if self.frame.is_transformable_to(frame_cls):
dir_values.add(name)
# Add public attributes of self.frame
dir_values.update({attr for attr in dir(self.frame) if not attr.startswith('_')})
# Add all possible frame attributes
dir_values.update(frame_transform_graph.frame_attributes.keys())
return dir_values
def __repr__(self):
clsnm = self.__class__.__name__
coonm = self.frame.__class__.__name__
frameattrs = self.frame._frame_attrs_repr()
if frameattrs:
frameattrs = ': ' + frameattrs
data = self.frame._data_repr()
if data:
data = ': ' + data
return f'<{clsnm} ({coonm}{frameattrs}){data}>'
def to_string(self, style='decimal', **kwargs):
"""
A string representation of the coordinates.
The default styles definitions are::
'decimal': 'lat': {'decimal': True, 'unit': "deg"}
'lon': {'decimal': True, 'unit': "deg"}
'dms': 'lat': {'unit': "deg"}
'lon': {'unit': "deg"}
'hmsdms': 'lat': {'alwayssign': True, 'pad': True, 'unit': "deg"}
'lon': {'pad': True, 'unit': "hour"}
See :meth:`~astropy.coordinates.Angle.to_string` for details and
keyword arguments (the two angles forming the coordinates are are
both :class:`~astropy.coordinates.Angle` instances). Keyword
arguments have precedence over the style defaults and are passed
to :meth:`~astropy.coordinates.Angle.to_string`.
Parameters
----------
style : {'hmsdms', 'dms', 'decimal'}
The formatting specification to use. These encode the three most
common ways to represent coordinates. The default is `decimal`.
**kwargs
Keyword args passed to :meth:`~astropy.coordinates.Angle.to_string`.
"""
sph_coord = self.frame.represent_as(SphericalRepresentation)
styles = {'hmsdms': {'lonargs': {'unit': u.hour, 'pad': True},
'latargs': {'unit': u.degree, 'pad': True, 'alwayssign': True}},
'dms': {'lonargs': {'unit': u.degree},
'latargs': {'unit': u.degree}},
'decimal': {'lonargs': {'unit': u.degree, 'decimal': True},
'latargs': {'unit': u.degree, 'decimal': True}}
}
lonargs = {}
latargs = {}
if style in styles:
lonargs.update(styles[style]['lonargs'])
latargs.update(styles[style]['latargs'])
else:
raise ValueError(f"Invalid style. Valid options are: {','.join(styles)}")
lonargs.update(kwargs)
latargs.update(kwargs)
if np.isscalar(sph_coord.lon.value):
coord_string = (sph_coord.lon.to_string(**lonargs) +
" " + sph_coord.lat.to_string(**latargs))
else:
coord_string = []
for lonangle, latangle in zip(sph_coord.lon.ravel(), sph_coord.lat.ravel()):
coord_string += [(lonangle.to_string(**lonargs) +
" " + latangle.to_string(**latargs))]
if len(sph_coord.shape) > 1:
coord_string = np.array(coord_string).reshape(sph_coord.shape)
return coord_string
def to_table(self):
"""
Convert this |SkyCoord| to a |QTable|.
Any attributes that have the same length as the |SkyCoord| will be
converted to columns of the |QTable|. All other attributes will be
recorded as metadata.
Returns
-------
`~astropy.table.QTable`
A |QTable| containing the data of this |SkyCoord|.
Examples
--------
>>> sc = SkyCoord(ra=[40, 70]*u.deg, dec=[0, -20]*u.deg,
... obstime=Time([2000, 2010], format='jyear'))
>>> t = sc.to_table()
>>> t
<QTable length=2>
ra dec obstime
deg deg
float64 float64 Time
------- ------- -------
40.0 0.0 2000.0
70.0 -20.0 2010.0
>>> t.meta
{'representation_type': 'spherical', 'frame': 'icrs'}
"""
self_as_dict = self.info._represent_as_dict()
tabledata = {}
metadata = {}
# Record attributes that have the same length as self as columns in the
# table, and the other attributes as table metadata. This matches
# table.serialize._represent_mixin_as_column().
for key, value in self_as_dict.items():
if getattr(value, 'shape', ())[:1] == (len(self),):
tabledata[key] = value
else:
metadata[key] = value
return QTable(tabledata, meta=metadata)
def is_equivalent_frame(self, other):
"""
Checks if this object's frame as the same as that of the ``other``
object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. For two `SkyCoord` objects, *all* of the
frame attributes have to match, not just those relevant for the object's
frame.
Parameters
----------
other : SkyCoord or BaseCoordinateFrame
The other object to check.
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a `SkyCoord` or a `BaseCoordinateFrame` or subclass.
"""
if isinstance(other, BaseCoordinateFrame):
return self.frame.is_equivalent_frame(other)
elif isinstance(other, SkyCoord):
if other.frame.name != self.frame.name:
return False
for fattrnm in frame_transform_graph.frame_attributes:
if not BaseCoordinateFrame._frameattr_equiv(getattr(self, fattrnm),
getattr(other, fattrnm)):
return False
return True
else:
# not a BaseCoordinateFrame nor a SkyCoord object
raise TypeError("Tried to do is_equivalent_frame on something that "
"isn't frame-like")
# High-level convenience methods
def separation(self, other):
"""
Computes on-sky separation between this coordinate and another.
.. note::
If the ``other`` coordinate object is in a different frame, it is
first transformed to the frame of this object. This can lead to
unintuitive behavior if not accounted for. Particularly of note is
that ``self.separation(other)`` and ``other.separation(self)`` may
not give the same answer in this case.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Angle`
The on-sky separation between this and the ``other`` coordinate.
Notes
-----
The separation is calculated using the Vincenty formula, which
is stable at all locations, including poles and antipodes [1]_.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
from . import Angle
from .angle_utilities import angular_separation
if not self.is_equivalent_frame(other):
try:
kwargs = {'merge_attributes': False} if isinstance(other, SkyCoord) else {}
other = other.transform_to(self, **kwargs)
except TypeError:
raise TypeError('Can only get separation to another SkyCoord '
'or a coordinate frame with data')
lon1 = self.spherical.lon
lat1 = self.spherical.lat
lon2 = other.spherical.lon
lat2 = other.spherical.lat
# Get the separation as a Quantity, convert to Angle in degrees
sep = angular_separation(lon1, lat1, lon2, lat2)
return Angle(sep, unit=u.degree)
def separation_3d(self, other):
"""
Computes three dimensional separation between this coordinate
and another.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Distance`
The real-space distance between these two coordinates.
Raises
------
ValueError
If this or the other coordinate do not have distances.
"""
if not self.is_equivalent_frame(other):
try:
kwargs = {'merge_attributes': False} if isinstance(other, SkyCoord) else {}
other = other.transform_to(self, **kwargs)
except TypeError:
raise TypeError('Can only get separation to another SkyCoord '
'or a coordinate frame with data')
if issubclass(self.data.__class__, UnitSphericalRepresentation):
raise ValueError('This object does not have a distance; cannot '
'compute 3d separation.')
if issubclass(other.data.__class__, UnitSphericalRepresentation):
raise ValueError('The other object does not have a distance; '
'cannot compute 3d separation.')
c1 = self.cartesian.without_differentials()
c2 = other.cartesian.without_differentials()
return Distance((c1 - c2).norm())
def spherical_offsets_to(self, tocoord):
r"""
Computes angular offsets to go *from* this coordinate *to* another.
Parameters
----------
tocoord : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to find the offset to.
Returns
-------
lon_offset : `~astropy.coordinates.Angle`
The angular offset in the longitude direction. The definition of
"longitude" depends on this coordinate's frame (e.g., RA for
equatorial coordinates).
lat_offset : `~astropy.coordinates.Angle`
The angular offset in the latitude direction. The definition of
"latitude" depends on this coordinate's frame (e.g., Dec for
equatorial coordinates).
Raises
------
ValueError
If the ``tocoord`` is not in the same frame as this one. This is
different from the behavior of the `separation`/`separation_3d`
methods because the offset components depend critically on the
specific choice of frame.
Notes
-----
This uses the sky offset frame machinery, and hence will produce a new
sky offset frame if one does not already exist for this object's frame
class.
See Also
--------
separation :
for the *total* angular offset (not broken out into components).
position_angle :
for the direction of the offset.
"""
if not self.is_equivalent_frame(tocoord):
raise ValueError('Tried to use spherical_offsets_to with two non-matching frames!')
aframe = self.skyoffset_frame()
acoord = tocoord.transform_to(aframe)
dlon = acoord.spherical.lon.view(Angle)
dlat = acoord.spherical.lat.view(Angle)
return dlon, dlat
def spherical_offsets_by(self, d_lon, d_lat):
"""
Computes the coordinate that is a specified pair of angular offsets away
from this coordinate.
Parameters
----------
d_lon : angle-like
The angular offset in the longitude direction. The definition of
"longitude" depends on this coordinate's frame (e.g., RA for
equatorial coordinates).
d_lat : angle-like
The angular offset in the latitude direction. The definition of
"latitude" depends on this coordinate's frame (e.g., Dec for
equatorial coordinates).
Returns
-------
newcoord : `~astropy.coordinates.SkyCoord`
The coordinates for the location that corresponds to offsetting by
``d_lat`` in the latitude direction and ``d_lon`` in the longitude
direction.
Notes
-----
This internally uses `~astropy.coordinates.SkyOffsetFrame` to do the
transformation. For a more complete set of transform offsets, use
`~astropy.coordinates.SkyOffsetFrame` or `~astropy.wcs.WCS` manually.
This specific method can be reproduced by doing
``SkyCoord(SkyOffsetFrame(d_lon, d_lat, origin=self.frame).transform_to(self))``.
See Also
--------
spherical_offsets_to : compute the angular offsets to another coordinate
directional_offset_by : offset a coordinate by an angle in a direction
"""
return self.__class__(
SkyOffsetFrame(d_lon, d_lat, origin=self.frame).transform_to(self))
def directional_offset_by(self, position_angle, separation):
"""
Computes coordinates at the given offset from this coordinate.
Parameters
----------
position_angle : `~astropy.coordinates.Angle`
position_angle of offset
separation : `~astropy.coordinates.Angle`
offset angular separation
Returns
-------
newpoints : `~astropy.coordinates.SkyCoord`
The coordinates for the location that corresponds to offsetting by
the given `position_angle` and `separation`.
Notes
-----
Returned SkyCoord frame retains only the frame attributes that are for
the resulting frame type. (e.g. if the input frame is
`~astropy.coordinates.ICRS`, an ``equinox`` value will be retained, but
an ``obstime`` will not.)
For a more complete set of transform offsets, use `~astropy.wcs.WCS`.
`~astropy.coordinates.SkyCoord.skyoffset_frame()` can also be used to
create a spherical frame with (lat=0, lon=0) at a reference point,
approximating an xy cartesian system for small offsets. This method
is distinct in that it is accurate on the sphere.
See Also
--------
position_angle : inverse operation for the ``position_angle`` component
separation : inverse operation for the ``separation`` component
"""
from . import angle_utilities
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
newlon, newlat = angle_utilities.offset_by(
lon=slon, lat=slat,
posang=position_angle, distance=separation)
return SkyCoord(newlon, newlat, frame=self.frame)
def match_to_catalog_sky(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest on-sky matches of this coordinate in a set of
catalog coordinates.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is ``2``,
for matching a coordinate catalog against *itself* (``1``
is inappropriate because each point will find itself as the
closest match).
Returns
-------
idx : int array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object. Unless both this and ``catalogcoord`` have associated
distances, this quantity assumes that all sources are at a
distance of 1 (dimensionless).
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_sky
SkyCoord.match_to_catalog_3d
"""
from .matching import match_coordinates_sky
if not (isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data):
raise TypeError('Can only get separation to another SkyCoord or a '
'coordinate frame with data')
res = match_coordinates_sky(self, catalogcoord,
nthneighbor=nthneighbor,
storekdtree='_kdtree_sky')
return res
def match_to_catalog_3d(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest 3-dimensional matches of this coordinate to a set
of catalog coordinates.
This finds the 3-dimensional closest neighbor, which is only different
from the on-sky distance if ``distance`` is set in this object or the
``catalogcoord`` object.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is
``2``, for matching a coordinate catalog against *itself*
(``1`` is inappropriate because each point will find
itself as the closest match).
Returns
-------
idx : int array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_3d
SkyCoord.match_to_catalog_sky
"""
from .matching import match_coordinates_3d
if not (isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data):
raise TypeError('Can only get separation to another SkyCoord or a '
'coordinate frame with data')
res = match_coordinates_3d(self, catalogcoord,
nthneighbor=nthneighbor,
storekdtree='_kdtree_3d')
return res
def search_around_sky(self, searcharoundcoords, seplimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given on-sky separation.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation`.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : coordinate-like
The coordinates to search around to try to find matching points in
this `SkyCoord`. This should be an object with array coordinates,
not a scalar coordinate object.
seplimit : `~astropy.units.Quantity` ['angle']
The on-sky separation to search within.
Returns
-------
idxsearcharound : int array
Indices into ``searcharoundcoords`` that match the
corresponding elements of ``idxself``. Shape matches
``idxself``.
idxself : int array
Indices into ``self`` that match the
corresponding elements of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_sky
SkyCoord.search_around_3d
"""
from .matching import search_around_sky
return search_around_sky(searcharoundcoords, self, seplimit,
storekdtree='_kdtree_sky')
def search_around_3d(self, searcharoundcoords, distlimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given 3D radius.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation_3d`.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinates to search around to try to find matching points in
this `SkyCoord`. This should be an object with array coordinates,
not a scalar coordinate object.
distlimit : `~astropy.units.Quantity` ['length']
The physical radius to search within.
Returns
-------
idxsearcharound : int array
Indices into ``searcharoundcoords`` that match the
corresponding elements of ``idxself``. Shape matches
``idxself``.
idxself : int array
Indices into ``self`` that match the
corresponding elements of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_3d
SkyCoord.search_around_sky
"""
from .matching import search_around_3d
return search_around_3d(searcharoundcoords, self, distlimit,
storekdtree='_kdtree_3d')
def position_angle(self, other):
"""
Computes the on-sky position angle (East of North) between this
`SkyCoord` and another.
Parameters
----------
other : `SkyCoord`
The other coordinate to compute the position angle to. It is
treated as the "head" of the vector of the position angle.
Returns
-------
pa : `~astropy.coordinates.Angle`
The (positive) position angle of the vector pointing from ``self``
to ``other``. If either ``self`` or ``other`` contain arrays, this
will be an array following the appropriate `numpy` broadcasting
rules.
Examples
--------
>>> c1 = SkyCoord(0*u.deg, 0*u.deg)
>>> c2 = SkyCoord(1*u.deg, 0*u.deg)
>>> c1.position_angle(c2).degree
90.0
>>> c3 = SkyCoord(1*u.deg, 1*u.deg)
>>> c1.position_angle(c3).degree # doctest: +FLOAT_CMP
44.995636455344844
"""
from . import angle_utilities
if not self.is_equivalent_frame(other):
try:
other = other.transform_to(self, merge_attributes=False)
except TypeError:
raise TypeError('Can only get position_angle to another '
'SkyCoord or a coordinate frame with data')
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
olat = other.represent_as(UnitSphericalRepresentation).lat
olon = other.represent_as(UnitSphericalRepresentation).lon
return angle_utilities.position_angle(slon, slat, olon, olat)
def skyoffset_frame(self, rotation=None):
"""
Returns the sky offset frame with this `SkyCoord` at the origin.
Returns
-------
astrframe : `~astropy.coordinates.SkyOffsetFrame`
A sky offset frame of the same type as this `SkyCoord` (e.g., if
this object has an ICRS coordinate, the resulting frame is
SkyOffsetICRS, with the origin set to this object)
rotation : angle-like
The final rotation of the frame about the ``origin``. The sign of
the rotation is the left-hand rule. That is, an object at a
particular position angle in the un-rotated system will be sent to
the positive latitude (z) direction in the final frame.
"""
return SkyOffsetFrame(origin=self, rotation=rotation)
def get_constellation(self, short_name=False, constellation_list='iau'):
"""
Determines the constellation(s) of the coordinates this `SkyCoord`
contains.
Parameters
----------
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If this is a scalar coordinate, returns the name of the
constellation. If it is an array `SkyCoord`, it returns an array of
names.
Notes
-----
To determine which constellation a point on the sky is in, this first
precesses to B1875, and then uses the Delporte boundaries of the 88
modern constellations, as tabulated by
`Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_.
See Also
--------
astropy.coordinates.get_constellation
"""
from .funcs import get_constellation
# because of issue #7028, the conversion to a PrecessedGeocentric
# system fails in some cases. Work around is to drop the velocities.
# they are not needed here since only position information is used
extra_frameattrs = {nm: getattr(self, nm)
for nm in self._extra_frameattr_names}
novel = SkyCoord(self.realize_frame(self.data.without_differentials()),
**extra_frameattrs)
return get_constellation(novel, short_name, constellation_list)
# the simpler version below can be used when gh-issue #7028 is resolved
# return get_constellation(self, short_name, constellation_list)
# WCS pixel to/from sky conversions
def to_pixel(self, wcs, origin=0, mode='all'):
"""
Convert this coordinate to pixel coordinates using a `~astropy.wcs.WCS`
object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.wcs.utils.skycoord_to_pixel : the implementation of this method
"""
from astropy.wcs.utils import skycoord_to_pixel
return skycoord_to_pixel(self, wcs=wcs, origin=origin, mode=mode)
@classmethod
def from_pixel(cls, xp, yp, wcs, origin=0, mode='all'):
"""
Create a new `SkyCoord` from pixel coordinates using an
`~astropy.wcs.WCS` object.
Parameters
----------
xp, yp : float or ndarray
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
coord : `~astropy.coordinates.SkyCoord`
A new object with sky coordinates corresponding to the input ``xp``
and ``yp``.
See Also
--------
to_pixel : to do the inverse operation
astropy.wcs.utils.pixel_to_skycoord : the implementation of this method
"""
from astropy.wcs.utils import pixel_to_skycoord
return pixel_to_skycoord(xp, yp, wcs=wcs, origin=origin, mode=mode, cls=cls)
def contained_by(self, wcs, image=None, **kwargs):
"""
Determines if the SkyCoord is contained in the given wcs footprint.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The coordinate to check if it is within the wcs coordinate.
image : array
Optional. The image associated with the wcs object that the cooordinate
is being checked against. If not given the naxis keywords will be used
to determine if the coordinate falls within the wcs footprint.
**kwargs
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
if image is not None:
ymax, xmax = image.shape
else:
xmax, ymax = wcs._naxis
import warnings
with warnings.catch_warnings():
# Suppress warnings since they just mean we didn't find the coordinate
warnings.simplefilter("ignore")
try:
x, y = self.to_pixel(wcs, **kwargs)
except Exception:
return False
return (x < xmax) & (x > 0) & (y < ymax) & (y > 0)
def radial_velocity_correction(self, kind='barycentric', obstime=None,
location=None):
"""
Compute the correction required to convert a radial velocity at a given
time and place on the Earth's Surface to a barycentric or heliocentric
velocity.
Parameters
----------
kind : str
The kind of velocity correction. Must be 'barycentric' or
'heliocentric'.
obstime : `~astropy.time.Time` or None, optional
The time at which to compute the correction. If `None`, the
``obstime`` frame attribute on the `SkyCoord` will be used.
location : `~astropy.coordinates.EarthLocation` or None, optional
The observer location at which to compute the correction. If
`None`, the ``location`` frame attribute on the passed-in
``obstime`` will be used, and if that is None, the ``location``
frame attribute on the `SkyCoord` will be used.
Raises
------
ValueError
If either ``obstime`` or ``location`` are passed in (not ``None``)
when the frame attribute is already set on this `SkyCoord`.
TypeError
If ``obstime`` or ``location`` aren't provided, either as arguments
or as frame attributes.
Returns
-------
vcorr : `~astropy.units.Quantity` ['speed']
The correction with a positive sign. I.e., *add* this
to an observed radial velocity to get the barycentric (or
heliocentric) velocity. If m/s precision or better is needed,
see the notes below.
Notes
-----
The barycentric correction is calculated to higher precision than the
heliocentric correction and includes additional physics (e.g time dilation).
Use barycentric corrections if m/s precision is required.
The algorithm here is sufficient to perform corrections at the mm/s level, but
care is needed in application. The barycentric correction returned uses the optical
approximation v = z * c. Strictly speaking, the barycentric correction is
multiplicative and should be applied as::
>>> from astropy.time import Time
>>> from astropy.coordinates import SkyCoord, EarthLocation
>>> from astropy.constants import c
>>> t = Time(56370.5, format='mjd', scale='utc')
>>> loc = EarthLocation('149d33m00.5s','-30d18m46.385s',236.87*u.m)
>>> sc = SkyCoord(1*u.deg, 2*u.deg)
>>> vcorr = sc.radial_velocity_correction(kind='barycentric', obstime=t, location=loc) # doctest: +REMOTE_DATA
>>> rv = rv + vcorr + rv * vcorr / c # doctest: +SKIP
Also note that this method returns the correction velocity in the so-called
*optical convention*::
>>> vcorr = zb * c # doctest: +SKIP
where ``zb`` is the barycentric correction redshift as defined in section 3
of Wright & Eastman (2014). The application formula given above follows from their
equation (11) under assumption that the radial velocity ``rv`` has also been defined
using the same optical convention. Note, this can be regarded as a matter of
velocity definition and does not by itself imply any loss of accuracy, provided
sufficient care has been taken during interpretation of the results. If you need
the barycentric correction expressed as the full relativistic velocity (e.g., to provide
it as the input to another software which performs the application), the
following recipe can be used::
>>> zb = vcorr / c # doctest: +REMOTE_DATA
>>> zb_plus_one_squared = (zb + 1) ** 2 # doctest: +REMOTE_DATA
>>> vcorr_rel = c * (zb_plus_one_squared - 1) / (zb_plus_one_squared + 1) # doctest: +REMOTE_DATA
or alternatively using just equivalencies::
>>> vcorr_rel = vcorr.to(u.Hz, u.doppler_optical(1*u.Hz)).to(vcorr.unit, u.doppler_relativistic(1*u.Hz)) # doctest: +REMOTE_DATA
See also `~astropy.units.equivalencies.doppler_optical`,
`~astropy.units.equivalencies.doppler_radio`, and
`~astropy.units.equivalencies.doppler_relativistic` for more information on
the velocity conventions.
The default is for this method to use the builtin ephemeris for
computing the sun and earth location. Other ephemerides can be chosen
by setting the `~astropy.coordinates.solar_system_ephemeris` variable,
either directly or via ``with`` statement. For example, to use the JPL
ephemeris, do::
>>> from astropy.coordinates import solar_system_ephemeris
>>> sc = SkyCoord(1*u.deg, 2*u.deg)
>>> with solar_system_ephemeris.set('jpl'): # doctest: +REMOTE_DATA
... rv += sc.radial_velocity_correction(obstime=t, location=loc) # doctest: +SKIP
"""
# has to be here to prevent circular imports
from .solar_system import get_body_barycentric_posvel
# location validation
timeloc = getattr(obstime, 'location', None)
if location is None:
if self.location is not None:
location = self.location
if timeloc is not None:
raise ValueError('`location` cannot be in both the '
'passed-in `obstime` and this `SkyCoord` '
'because it is ambiguous which is meant '
'for the radial_velocity_correction.')
elif timeloc is not None:
location = timeloc
else:
raise TypeError('Must provide a `location` to '
'radial_velocity_correction, either as a '
'SkyCoord frame attribute, as an attribute on '
'the passed in `obstime`, or in the method '
'call.')
elif self.location is not None or timeloc is not None:
raise ValueError('Cannot compute radial velocity correction if '
'`location` argument is passed in and there is '
'also a `location` attribute on this SkyCoord or '
'the passed-in `obstime`.')
# obstime validation
coo_at_rv_obstime = self # assume we need no space motion for now
if obstime is None:
obstime = self.obstime
if obstime is None:
raise TypeError('Must provide an `obstime` to '
'radial_velocity_correction, either as a '
'SkyCoord frame attribute or in the method '
'call.')
elif self.obstime is not None and self.frame.data.differentials:
# we do need space motion after all
coo_at_rv_obstime = self.apply_space_motion(obstime)
elif self.obstime is None:
# warn the user if the object has differentials set
if 's' in self.data.differentials:
warnings.warn(
"SkyCoord has space motion, and therefore the specified "
"position of the SkyCoord may not be the same as "
"the `obstime` for the radial velocity measurement. "
"This may affect the rv correction at the order of km/s"
"for very high proper motions sources. If you wish to "
"apply space motion of the SkyCoord to correct for this"
"the `obstime` attribute of the SkyCoord must be set",
AstropyUserWarning
)
pos_earth, v_earth = get_body_barycentric_posvel('earth', obstime)
if kind == 'barycentric':
v_origin_to_earth = v_earth
elif kind == 'heliocentric':
v_sun = get_body_barycentric_posvel('sun', obstime)[1]
v_origin_to_earth = v_earth - v_sun
else:
raise ValueError("`kind` argument to radial_velocity_correction must "
"be 'barycentric' or 'heliocentric', but got "
"'{}'".format(kind))
gcrs_p, gcrs_v = location.get_gcrs_posvel(obstime)
# transforming to GCRS is not the correct thing to do here, since we don't want to
# include aberration (or light deflection)? Instead, only apply parallax if necessary
icrs_cart = coo_at_rv_obstime.icrs.cartesian
icrs_cart_novel = icrs_cart.without_differentials()
if self.data.__class__ is UnitSphericalRepresentation:
targcart = icrs_cart_novel
else:
# skycoord has distances so apply parallax
obs_icrs_cart = pos_earth + gcrs_p
targcart = icrs_cart_novel - obs_icrs_cart
targcart /= targcart.norm()
if kind == 'barycentric':
beta_obs = (v_origin_to_earth + gcrs_v) / speed_of_light
gamma_obs = 1 / np.sqrt(1 - beta_obs.norm()**2)
gr = location.gravitational_redshift(obstime)
# barycentric redshift according to eq 28 in Wright & Eastmann (2014),
# neglecting Shapiro delay and effects of the star's own motion
zb = gamma_obs * (1 + beta_obs.dot(targcart)) / (1 + gr/speed_of_light)
# try and get terms corresponding to stellar motion.
if icrs_cart.differentials:
try:
ro = self.icrs.cartesian
beta_star = ro.differentials['s'].to_cartesian() / speed_of_light
# ICRS unit vector at coordinate epoch
ro = ro.without_differentials()
ro /= ro.norm()
zb *= (1 + beta_star.dot(ro)) / (1 + beta_star.dot(targcart))
except u.UnitConversionError:
warnings.warn("SkyCoord contains some velocity information, but not enough to "
"calculate the full space motion of the source, and so this has "
"been ignored for the purposes of calculating the radial velocity "
"correction. This can lead to errors on the order of metres/second.",
AstropyUserWarning)
zb = zb - 1
return zb * speed_of_light
else:
# do a simpler correction ignoring time dilation and gravitational redshift
# this is adequate since Heliocentric corrections shouldn't be used if
# cm/s precision is required.
return targcart.dot(v_origin_to_earth + gcrs_v)
# Table interactions
@classmethod
def guess_from_table(cls, table, **coord_kwargs):
r"""
A convenience method to create and return a new `SkyCoord` from the data
in an astropy Table.
This method matches table columns that start with the case-insensitive
names of the the components of the requested frames (including
differentials), if they are also followed by a non-alphanumeric
character. It will also match columns that *end* with the component name
if a non-alphanumeric character is *before* it.
For example, the first rule means columns with names like
``'RA[J2000]'`` or ``'ra'`` will be interpreted as ``ra`` attributes for
`~astropy.coordinates.ICRS` frames, but ``'RAJ2000'`` or ``'radius'``
are *not*. Similarly, the second rule applied to the
`~astropy.coordinates.Galactic` frame means that a column named
``'gal_l'`` will be used as the the ``l`` component, but ``gall`` or
``'fill'`` will not.
The definition of alphanumeric here is based on Unicode's definition
of alphanumeric, except without ``_`` (which is normally considered
alphanumeric). So for ASCII, this means the non-alphanumeric characters
are ``<space>_!"#$%&'()*+,-./\:;<=>?@[]^`{|}~``).
Parameters
----------
table : `~astropy.table.Table` or subclass
The table to load data from.
**coord_kwargs
Any additional keyword arguments are passed directly to this class's
constructor.
Returns
-------
newsc : `~astropy.coordinates.SkyCoord` or subclass
The new `SkyCoord` (or subclass) object.
Raises
------
ValueError
If more than one match is found in the table for a component,
unless the additional matches are also valid frame component names.
If a "coord_kwargs" is provided for a value also found in the table.
"""
_frame_cls, _frame_kwargs = _get_frame_without_data([], coord_kwargs)
frame = _frame_cls(**_frame_kwargs)
coord_kwargs['frame'] = coord_kwargs.get('frame', frame)
representation_component_names = (
set(frame.get_representation_component_names())
.union(set(frame.get_representation_component_names("s")))
)
comp_kwargs = {}
for comp_name in representation_component_names:
# this matches things like 'ra[...]'' but *not* 'rad'.
# note that the "_" must be in there explicitly, because
# "alphanumeric" usually includes underscores.
starts_with_comp = comp_name + r'(\W|\b|_)'
# this part matches stuff like 'center_ra', but *not*
# 'aura'
ends_with_comp = r'.*(\W|\b|_)' + comp_name + r'\b'
# the final regex ORs together the two patterns
rex = re.compile(rf"({starts_with_comp})|({ends_with_comp})",
re.IGNORECASE | re.UNICODE)
# find all matches
matches = {col_name for col_name in table.colnames
if rex.match(col_name)}
# now need to select among matches, also making sure we don't have
# an exact match with another component
if len(matches) == 0: # no matches
continue
elif len(matches) == 1: # only one match
col_name = matches.pop()
else: # more than 1 match
# try to sieve out other components
matches -= representation_component_names - {comp_name}
# if there's only one remaining match, it worked.
if len(matches) == 1:
col_name = matches.pop()
else:
raise ValueError(
'Found at least two matches for component '
f'"{comp_name}": "{matches}". Cannot guess coordinates '
'from a table with this ambiguity.')
comp_kwargs[comp_name] = table[col_name]
for k, v in comp_kwargs.items():
if k in coord_kwargs:
raise ValueError('Found column "{}" in table, but it was '
'already provided as "{}" keyword to '
'guess_from_table function.'.format(v.name, k))
else:
coord_kwargs[k] = v
return cls(**coord_kwargs)
# Name resolve
@classmethod
def from_name(cls, name, frame='icrs', parse=False, cache=True):
"""
Given a name, query the CDS name resolver to attempt to retrieve
coordinate information for that object. The search database, sesame
url, and query timeout can be set through configuration items in
``astropy.coordinates.name_resolve`` -- see docstring for
`~astropy.coordinates.get_icrs_coordinates` for more
information.
Parameters
----------
name : str
The name of the object to get coordinates for, e.g. ``'M42'``.
frame : str or `BaseCoordinateFrame` class or instance
The frame to transform the object to.
parse : bool
Whether to attempt extracting the coordinates from the name by
parsing with a regex. For objects catalog names that have
J-coordinates embedded in their names, e.g.,
'CRTS SSS100805 J194428-420209', this may be much faster than a
Sesame query for the same object name. The coordinates extracted
in this way may differ from the database coordinates by a few
deci-arcseconds, so only use this option if you do not need
sub-arcsecond accuracy for coordinates.
cache : bool, optional
Determines whether to cache the results or not. To update or
overwrite an existing value, pass ``cache='update'``.
Returns
-------
coord : SkyCoord
Instance of the SkyCoord class.
"""
from .name_resolve import get_icrs_coordinates
icrs_coord = get_icrs_coordinates(name, parse, cache=cache)
icrs_sky_coord = cls(icrs_coord)
if frame in ('icrs', icrs_coord.__class__):
return icrs_sky_coord
else:
return icrs_sky_coord.transform_to(frame)
|
bb5c9a27cf338a975891b24eed83264676bc783b706220e99b3fe23b17cb52fe | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file was automatically generated from ply. To re-generate this file,
# remove it from this folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/coordinates
#
# You can then commit the changes to this file.
# angle_lextab.py. This file automatically created by PLY (version 3.11). Don't edit!
_tabversion = '3.10'
_lextokens = {'COLON', 'DEGREE', 'EASTWEST', 'HOUR', 'MINUTE', 'NORTHSOUTH', 'SECOND', 'SIGN', 'SIMPLE_UNIT', 'UFLOAT', 'UINT'}
_lexreflags = 64
_lexliterals = ''
_lexstateinfo = {'INITIAL': 'inclusive'}
_lexstatere = {'INITIAL': [('(?P<t_UFLOAT>((\\d+\\.\\d*)|(\\.\\d+))([eE][+-−]?\\d+)?)|(?P<t_UINT>\\d+)|(?P<t_SIGN>[+−-])|(?P<t_EASTWEST>[EW]$)|(?P<t_NORTHSOUTH>[NS]$)|(?P<t_SIMPLE_UNIT>(?:Earcmin)|(?:Earcsec)|(?:Edeg)|(?:Erad)|(?:Garcmin)|(?:Garcsec)|(?:Gdeg)|(?:Grad)|(?:Marcmin)|(?:Marcsec)|(?:Mdeg)|(?:Mrad)|(?:Parcmin)|(?:Parcsec)|(?:Pdeg)|(?:Prad)|(?:Tarcmin)|(?:Tarcsec)|(?:Tdeg)|(?:Trad)|(?:Yarcmin)|(?:Yarcsec)|(?:Ydeg)|(?:Yrad)|(?:Zarcmin)|(?:Zarcsec)|(?:Zdeg)|(?:Zrad)|(?:aarcmin)|(?:aarcsec)|(?:adeg)|(?:arad)|(?:arcmin)|(?:arcminute)|(?:arcsec)|(?:arcsecond)|(?:attoarcminute)|(?:attoarcsecond)|(?:attodegree)|(?:attoradian)|(?:carcmin)|(?:carcsec)|(?:cdeg)|(?:centiarcminute)|(?:centiarcsecond)|(?:centidegree)|(?:centiradian)|(?:crad)|(?:cy)|(?:cycle)|(?:daarcmin)|(?:daarcsec)|(?:dadeg)|(?:darad)|(?:darcmin)|(?:darcsec)|(?:ddeg)|(?:decaarcminute)|(?:decaarcsecond)|(?:decadegree)|(?:decaradian)|(?:deciarcminute)|(?:deciarcsecond)|(?:decidegree)|(?:deciradian)|(?:dekaarcminute)|(?:dekaarcsecond)|(?:dekadegree)|(?:dekaradian)|(?:drad)|(?:exaarcminute)|(?:exaarcsecond)|(?:exadegree)|(?:exaradian)|(?:farcmin)|(?:farcsec)|(?:fdeg)|(?:femtoarcminute)|(?:femtoarcsecond)|(?:femtodegree)|(?:femtoradian)|(?:frad)|(?:gigaarcminute)|(?:gigaarcsecond)|(?:gigadegree)|(?:gigaradian)|(?:harcmin)|(?:harcsec)|(?:hdeg)|(?:hectoarcminute)|(?:hectoarcsecond)|(?:hectodegree)|(?:hectoradian)|(?:hrad)|(?:karcmin)|(?:karcsec)|(?:kdeg)|(?:kiloarcminute)|(?:kiloarcsecond)|(?:kilodegree)|(?:kiloradian)|(?:krad)|(?:marcmin)|(?:marcsec)|(?:mas)|(?:mdeg)|(?:megaarcminute)|(?:megaarcsecond)|(?:megadegree)|(?:megaradian)|(?:microarcminute)|(?:microarcsecond)|(?:microdegree)|(?:microradian)|(?:milliarcminute)|(?:milliarcsecond)|(?:millidegree)|(?:milliradian)|(?:mrad)|(?:nanoarcminute)|(?:nanoarcsecond)|(?:nanodegree)|(?:nanoradian)|(?:narcmin)|(?:narcsec)|(?:ndeg)|(?:nrad)|(?:parcmin)|(?:parcsec)|(?:pdeg)|(?:petaarcminute)|(?:petaarcsecond)|(?:petadegree)|(?:petaradian)|(?:picoarcminute)|(?:picoarcsecond)|(?:picodegree)|(?:picoradian)|(?:prad)|(?:rad)|(?:radian)|(?:teraarcminute)|(?:teraarcsecond)|(?:teradegree)|(?:teraradian)|(?:uarcmin)|(?:uarcsec)|(?:uas)|(?:udeg)|(?:urad)|(?:yarcmin)|(?:yarcsec)|(?:ydeg)|(?:yoctoarcminute)|(?:yoctoarcsecond)|(?:yoctodegree)|(?:yoctoradian)|(?:yottaarcminute)|(?:yottaarcsecond)|(?:yottadegree)|(?:yottaradian)|(?:yrad)|(?:zarcmin)|(?:zarcsec)|(?:zdeg)|(?:zeptoarcminute)|(?:zeptoarcsecond)|(?:zeptodegree)|(?:zeptoradian)|(?:zettaarcminute)|(?:zettaarcsecond)|(?:zettadegree)|(?:zettaradian)|(?:zrad))|(?P<t_MINUTE>m(in(ute(s)?)?)?|′|\\\'|ᵐ)|(?P<t_SECOND>s(ec(ond(s)?)?)?|″|\\"|ˢ)|(?P<t_DEGREE>d(eg(ree(s)?)?)?|°)|(?P<t_HOUR>hour(s)?|h(r)?|ʰ)|(?P<t_COLON>:)', [None, ('t_UFLOAT', 'UFLOAT'), None, None, None, None, ('t_UINT', 'UINT'), ('t_SIGN', 'SIGN'), ('t_EASTWEST', 'EASTWEST'), ('t_NORTHSOUTH', 'NORTHSOUTH'), ('t_SIMPLE_UNIT', 'SIMPLE_UNIT'), (None, 'MINUTE'), None, None, None, (None, 'SECOND'), None, None, None, (None, 'DEGREE'), None, None, None, (None, 'HOUR'), None, None, (None, 'COLON')])]}
_lexstateignore = {'INITIAL': ' '}
_lexstateerrorf = {'INITIAL': 't_error'}
_lexstateeoff = {}
|
d0b30bb7bfd9167d00ec7d4e4c616c8efc9b8bfdf9aa8ac4bfc0a72fc1e9197f | """
In this module, we define the coordinate representation classes, which are
used to represent low-level cartesian, spherical, cylindrical, and other
coordinates.
"""
import abc
import functools
import operator
import inspect
import warnings
import numpy as np
import astropy.units as u
from erfa import ufunc as erfa_ufunc
from .angles import Angle, Longitude, Latitude
from .distances import Distance
from .matrix_utilities import is_O3
from astropy.utils import ShapedLikeNDArray, classproperty
from astropy.utils.data_info import MixinInfo
from astropy.utils.exceptions import DuplicateRepresentationWarning
__all__ = ["BaseRepresentationOrDifferential", "BaseRepresentation",
"CartesianRepresentation", "SphericalRepresentation",
"UnitSphericalRepresentation", "RadialRepresentation",
"PhysicsSphericalRepresentation", "CylindricalRepresentation",
"BaseDifferential", "CartesianDifferential",
"BaseSphericalDifferential", "BaseSphericalCosLatDifferential",
"SphericalDifferential", "SphericalCosLatDifferential",
"UnitSphericalDifferential", "UnitSphericalCosLatDifferential",
"RadialDifferential", "CylindricalDifferential",
"PhysicsSphericalDifferential"]
# Module-level dict mapping representation string alias names to classes.
# This is populated by __init_subclass__ when called by Representation or
# Differential classes so that they are all registered automatically.
REPRESENTATION_CLASSES = {}
DIFFERENTIAL_CLASSES = {}
# set for tracking duplicates
DUPLICATE_REPRESENTATIONS = set()
# a hash for the content of the above two dicts, cached for speed.
_REPRDIFF_HASH = None
def _fqn_class(cls):
''' Get the fully qualified name of a class '''
return cls.__module__ + '.' + cls.__qualname__
def get_reprdiff_cls_hash():
"""
Returns a hash value that should be invariable if the
`REPRESENTATION_CLASSES` and `DIFFERENTIAL_CLASSES` dictionaries have not
changed.
"""
global _REPRDIFF_HASH
if _REPRDIFF_HASH is None:
_REPRDIFF_HASH = (hash(tuple(REPRESENTATION_CLASSES.items())) +
hash(tuple(DIFFERENTIAL_CLASSES.items())))
return _REPRDIFF_HASH
def _invalidate_reprdiff_cls_hash():
global _REPRDIFF_HASH
_REPRDIFF_HASH = None
def _array2string(values, prefix=''):
# Work around version differences for array2string.
kwargs = {'separator': ', ', 'prefix': prefix}
kwargs['formatter'] = {}
return np.array2string(values, **kwargs)
class BaseRepresentationOrDifferentialInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = {'unit'} # Indicates unit is read-only
_supports_indexing = False
@staticmethod
def default_format(val):
# Create numpy dtype so that numpy formatting will work.
components = val.components
values = tuple(getattr(val, component).value for component in components)
a = np.empty(getattr(val, 'shape', ()),
[(component, value.dtype) for component, value
in zip(components, values)])
for component, value in zip(components, values):
a[component] = value
return str(a)
@property
def _represent_as_dict_attrs(self):
return self._parent.components
@property
def unit(self):
if self._parent is None:
return None
unit = self._parent._unitstr
return unit[1:-1] if unit.startswith('(') else unit
def new_like(self, reps, length, metadata_conflicts='warn', name=None):
"""
Return a new instance like ``reps`` with ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
reps : list
List of input representations or differentials.
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : `BaseRepresentation` or `BaseDifferential` subclass instance
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(reps, metadata_conflicts, name,
('meta', 'description'))
# Make a new representation or differential with the desired length
# using the _apply / __getitem__ machinery to effectively return
# rep0[[0, 0, ..., 0, 0]]. This will have the right shape, and
# include possible differentials.
indexes = np.zeros(length, dtype=np.int64)
out = reps[0][indexes]
# Use __setitem__ machinery to check whether all representations
# can represent themselves as this one without loss of information.
for rep in reps[1:]:
try:
out[0] = rep[0]
except Exception as err:
raise ValueError(f'input representations are inconsistent.') from err
# Set (merged) info attributes.
for attr in ('name', 'meta', 'description'):
if attr in attrs:
setattr(out.info, attr, attrs[attr])
return out
class BaseRepresentationOrDifferential(ShapedLikeNDArray):
"""3D coordinate representations and differentials.
Parameters
----------
comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D point or differential. The names are the
keys and the subclasses the values of the ``attr_classes`` attribute.
copy : bool, optional
If `True` (default), arrays will be copied; if `False`, they will be
broadcast together but not use new memory.
"""
# Ensure multiplication/division with ndarray or Quantity doesn't lead to
# object arrays.
__array_priority__ = 50000
info = BaseRepresentationOrDifferentialInfo()
def __init__(self, *args, **kwargs):
# make argument a list, so we can pop them off.
args = list(args)
components = self.components
if (args and isinstance(args[0], self.__class__)
and all(arg is None for arg in args[1:])):
rep_or_diff = args[0]
copy = kwargs.pop('copy', True)
attrs = [getattr(rep_or_diff, component)
for component in components]
if 'info' in rep_or_diff.__dict__:
self.info = rep_or_diff.info
if kwargs:
raise TypeError(f'unexpected keyword arguments for case '
f'where class instance is passed in: {kwargs}')
else:
attrs = []
for component in components:
try:
attr = args.pop(0) if args else kwargs.pop(component)
except KeyError:
raise TypeError(f'__init__() missing 1 required positional '
f'argument: {component!r}') from None
if attr is None:
raise TypeError(f'__init__() missing 1 required positional '
f'argument: {component!r} (or first '
f'argument should be an instance of '
f'{self.__class__.__name__}).')
attrs.append(attr)
copy = args.pop(0) if args else kwargs.pop('copy', True)
if args:
raise TypeError(f'unexpected arguments: {args}')
if kwargs:
for component in components:
if component in kwargs:
raise TypeError(f"__init__() got multiple values for "
f"argument {component!r}")
raise TypeError(f'unexpected keyword arguments: {kwargs}')
# Pass attributes through the required initializing classes.
attrs = [self.attr_classes[component](attr, copy=copy, subok=True)
for component, attr in zip(components, attrs)]
try:
bc_attrs = np.broadcast_arrays(*attrs, subok=True)
except ValueError as err:
if len(components) <= 2:
c_str = ' and '.join(components)
else:
c_str = ', '.join(components[:2]) + ', and ' + components[2]
raise ValueError(f"Input parameters {c_str} cannot be broadcast") from err
# The output of np.broadcast_arrays() has limitations on writeability, so we perform
# additional handling to enable writeability in most situations. This is primarily
# relevant for allowing the changing of the wrap angle of longitude components.
#
# If the shape has changed for a given component, broadcasting is needed:
# If copy=True, we make a copy of the broadcasted array to ensure writeability.
# Note that array had already been copied prior to the broadcasting.
# TODO: Find a way to avoid the double copy.
# If copy=False, we use the broadcasted array, and writeability may still be
# limited.
# If the shape has not changed for a given component, we can proceed with using the
# non-broadcasted array, which avoids writeability issues from np.broadcast_arrays().
attrs = [(bc_attr.copy() if copy else bc_attr) if bc_attr.shape != attr.shape else attr
for attr, bc_attr in zip(attrs, bc_attrs)]
# Set private attributes for the attributes. (If not defined explicitly
# on the class, the metaclass will define properties to access these.)
for component, attr in zip(components, attrs):
setattr(self, '_' + component, attr)
@classmethod
def get_name(cls):
"""Name of the representation or differential.
In lower case, with any trailing 'representation' or 'differential'
removed. (E.g., 'spherical' for
`~astropy.coordinates.SphericalRepresentation` or
`~astropy.coordinates.SphericalDifferential`.)
"""
name = cls.__name__.lower()
if name.endswith('representation'):
name = name[:-14]
elif name.endswith('differential'):
name = name[:-12]
return name
# The two methods that any subclass has to define.
@classmethod
@abc.abstractmethod
def from_cartesian(cls, other):
"""Create a representation of this class from a supplied Cartesian one.
Parameters
----------
other : `CartesianRepresentation`
The representation to turn into this class
Returns
-------
representation : `BaseRepresentation` subclass instance
A new representation of this class's type.
"""
# Note: the above docstring gets overridden for differentials.
raise NotImplementedError()
@abc.abstractmethod
def to_cartesian(self):
"""Convert the representation to its Cartesian form.
Note that any differentials get dropped.
Also note that orientation information at the origin is *not* preserved by
conversions through Cartesian coordinates. For example, transforming
an angular position defined at distance=0 through cartesian coordinates
and back will lose the original angular coordinates::
>>> import astropy.units as u
>>> import astropy.coordinates as coord
>>> rep = coord.SphericalRepresentation(
... lon=15*u.deg,
... lat=-11*u.deg,
... distance=0*u.pc)
>>> rep.to_cartesian().represent_as(coord.SphericalRepresentation)
<SphericalRepresentation (lon, lat, distance) in (rad, rad, pc)
(0., 0., 0.)>
Returns
-------
cartrepr : `CartesianRepresentation`
The representation in Cartesian form.
"""
# Note: the above docstring gets overridden for differentials.
raise NotImplementedError()
@property
def components(self):
"""A tuple with the in-order names of the coordinate components."""
return tuple(self.attr_classes)
def __eq__(self, value):
"""Equality operator
This implements strict equality and requires that the representation
classes are identical and that the representation data are exactly equal.
"""
if self.__class__ is not value.__class__:
raise TypeError(f'cannot compare: objects must have same class: '
f'{self.__class__.__name__} vs. '
f'{value.__class__.__name__}')
try:
np.broadcast(self, value)
except ValueError as exc:
raise ValueError(f'cannot compare: {exc}') from exc
out = True
for comp in self.components:
out &= (getattr(self, '_' + comp) == getattr(value, '_' + comp))
return out
def __ne__(self, value):
return np.logical_not(self == value)
def _apply(self, method, *args, **kwargs):
"""Create a new representation or differential with ``method`` applied
to the component data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be
applied to the underlying arrays (e.g., ``x``, ``y``, and ``z`` for
`~astropy.coordinates.CartesianRepresentation`), with the results used
to create a new instance.
Internally, it is also used to apply functions to the components
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args : tuple
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
new = super().__new__(self.__class__)
for component in self.components:
setattr(new, '_' + component,
apply_method(getattr(self, component)))
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if 'info' in self.__dict__:
new.info = self.info
return new
def __setitem__(self, item, value):
if value.__class__ is not self.__class__:
raise TypeError(f'can only set from object of same class: '
f'{self.__class__.__name__} vs. '
f'{value.__class__.__name__}')
for component in self.components:
getattr(self, '_' + component)[item] = getattr(value, '_' + component)
@property
def shape(self):
"""The shape of the instance and underlying arrays.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
ValueError
If the new shape has the wrong total number of elements.
AttributeError
If the shape of any of the components cannot be changed without the
arrays being copied. For these cases, use the ``reshape`` method
(which copies any arrays that cannot be reshaped in-place).
"""
return getattr(self, self.components[0]).shape
@shape.setter
def shape(self, shape):
# We keep track of arrays that were already reshaped since we may have
# to return those to their original shape if a later shape-setting
# fails. (This can happen since coordinates are broadcast together.)
reshaped = []
oldshape = self.shape
for component in self.components:
val = getattr(self, component)
if val.size > 1:
try:
val.shape = shape
except Exception:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
# Required to support multiplication and division, and defined by the base
# representation and differential classes.
@abc.abstractmethod
def _scale_operation(self, op, *args):
raise NotImplementedError()
def __mul__(self, other):
return self._scale_operation(operator.mul, other)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self._scale_operation(operator.truediv, other)
def __neg__(self):
return self._scale_operation(operator.neg)
# Follow numpy convention and make an independent copy.
def __pos__(self):
return self.copy()
# Required to support addition and subtraction, and defined by the base
# representation and differential classes.
@abc.abstractmethod
def _combine_operation(self, op, other, reverse=False):
raise NotImplementedError()
def __add__(self, other):
return self._combine_operation(operator.add, other)
def __radd__(self, other):
return self._combine_operation(operator.add, other, reverse=True)
def __sub__(self, other):
return self._combine_operation(operator.sub, other)
def __rsub__(self, other):
return self._combine_operation(operator.sub, other, reverse=True)
# The following are used for repr and str
@property
def _values(self):
"""Turn the coordinates into a record array with the coordinate values.
The record array fields will have the component names.
"""
coo_items = [(c, getattr(self, c)) for c in self.components]
result = np.empty(self.shape, [(c, coo.dtype) for c, coo in coo_items])
for c, coo in coo_items:
result[c] = coo.value
return result
@property
def _units(self):
"""Return a dictionary with the units of the coordinate components."""
return {cmpnt: getattr(self, cmpnt).unit for cmpnt in self.components}
@property
def _unitstr(self):
units_set = set(self._units.values())
if len(units_set) == 1:
unitstr = units_set.pop().to_string()
else:
unitstr = '({})'.format(
', '.join([self._units[component].to_string()
for component in self.components]))
return unitstr
def __str__(self):
return f'{_array2string(self._values)} {self._unitstr:s}'
def __repr__(self):
prefixstr = ' '
arrstr = _array2string(self._values, prefix=prefixstr)
diffstr = ''
if getattr(self, 'differentials', None):
diffstr = '\n (has differentials w.r.t.: {})'.format(
', '.join([repr(key) for key in self.differentials.keys()]))
unitstr = ('in ' + self._unitstr) if self._unitstr else '[dimensionless]'
return '<{} ({}) {:s}\n{}{}{}>'.format(
self.__class__.__name__, ', '.join(self.components),
unitstr, prefixstr, arrstr, diffstr)
def _make_getter(component):
"""Make an attribute getter for use in a property.
Parameters
----------
component : str
The name of the component that should be accessed. This assumes the
actual value is stored in an attribute of that name prefixed by '_'.
"""
# This has to be done in a function to ensure the reference to component
# is not lost/redirected.
component = '_' + component
def get_component(self):
return getattr(self, component)
return get_component
class RepresentationInfo(BaseRepresentationOrDifferentialInfo):
@property
def _represent_as_dict_attrs(self):
attrs = super()._represent_as_dict_attrs
if self._parent._differentials:
attrs += ('differentials',)
return attrs
def _represent_as_dict(self, attrs=None):
out = super()._represent_as_dict(attrs)
for key, value in out.pop('differentials', {}).items():
out[f'differentials.{key}'] = value
return out
def _construct_from_dict(self, map):
differentials = {}
for key in list(map.keys()):
if key.startswith('differentials.'):
differentials[key[14:]] = map.pop(key)
map['differentials'] = differentials
return super()._construct_from_dict(map)
class BaseRepresentation(BaseRepresentationOrDifferential):
"""Base for representing a point in a 3D coordinate system.
Parameters
----------
comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D points. The names are the keys and the
subclasses the values of the ``attr_classes`` attribute.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
subclass instance, or a dictionary with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
Notes
-----
All representation classes should subclass this base representation class,
and define an ``attr_classes`` attribute, a `dict`
which maps component names to the class that creates them. They must also
define a ``to_cartesian`` method and a ``from_cartesian`` class method. By
default, transformations are done via the cartesian system, but classes
that want to define a smarter transformation path can overload the
``represent_as`` method. If one wants to use an associated differential
class, one should also define ``unit_vectors`` and ``scale_factors``
methods (see those methods for details).
"""
info = RepresentationInfo()
def __init_subclass__(cls, **kwargs):
# Register representation name (except for BaseRepresentation)
if cls.__name__ == 'BaseRepresentation':
return
if not hasattr(cls, 'attr_classes'):
raise NotImplementedError('Representations must have an '
'"attr_classes" class attribute.')
repr_name = cls.get_name()
# first time a duplicate is added
# remove first entry and add both using their qualnames
if repr_name in REPRESENTATION_CLASSES:
DUPLICATE_REPRESENTATIONS.add(repr_name)
fqn_cls = _fqn_class(cls)
existing = REPRESENTATION_CLASSES[repr_name]
fqn_existing = _fqn_class(existing)
if fqn_cls == fqn_existing:
raise ValueError(f'Representation "{fqn_cls}" already defined')
msg = (
f'Representation "{repr_name}" already defined, removing it to avoid confusion.'
f'Use qualnames "{fqn_cls}" and "{fqn_existing}" or class instances directly'
)
warnings.warn(msg, DuplicateRepresentationWarning)
del REPRESENTATION_CLASSES[repr_name]
REPRESENTATION_CLASSES[fqn_existing] = existing
repr_name = fqn_cls
# further definitions with the same name, just add qualname
elif repr_name in DUPLICATE_REPRESENTATIONS:
fqn_cls = _fqn_class(cls)
warnings.warn(f'Representation "{repr_name}" already defined, using qualname '
f'"{fqn_cls}".')
repr_name = fqn_cls
if repr_name in REPRESENTATION_CLASSES:
raise ValueError(
f'Representation "{repr_name}" already defined'
)
REPRESENTATION_CLASSES[repr_name] = cls
_invalidate_reprdiff_cls_hash()
# define getters for any component that does not yet have one.
for component in cls.attr_classes:
if not hasattr(cls, component):
setattr(cls, component,
property(_make_getter(component),
doc=f"The '{component}' component of the points(s)."))
super().__init_subclass__(**kwargs)
def __init__(self, *args, differentials=None, **kwargs):
# Handle any differentials passed in.
super().__init__(*args, **kwargs)
if (differentials is None
and args and isinstance(args[0], self.__class__)):
differentials = args[0]._differentials
self._differentials = self._validate_differentials(differentials)
def _validate_differentials(self, differentials):
"""
Validate that the provided differentials are appropriate for this
representation and recast/reshape as necessary and then return.
Note that this does *not* set the differentials on
``self._differentials``, but rather leaves that for the caller.
"""
# Now handle the actual validation of any specified differential classes
if differentials is None:
differentials = dict()
elif isinstance(differentials, BaseDifferential):
# We can't handle auto-determining the key for this combo
if (isinstance(differentials, RadialDifferential) and
isinstance(self, UnitSphericalRepresentation)):
raise ValueError("To attach a RadialDifferential to a "
"UnitSphericalRepresentation, you must supply "
"a dictionary with an appropriate key.")
key = differentials._get_deriv_key(self)
differentials = {key: differentials}
for key in differentials:
try:
diff = differentials[key]
except TypeError as err:
raise TypeError("'differentials' argument must be a "
"dictionary-like object") from err
diff._check_base(self)
if (isinstance(diff, RadialDifferential) and
isinstance(self, UnitSphericalRepresentation)):
# We trust the passing of a key for a RadialDifferential
# attached to a UnitSphericalRepresentation because it will not
# have a paired component name (UnitSphericalRepresentation has
# no .distance) to automatically determine the expected key
pass
else:
expected_key = diff._get_deriv_key(self)
if key != expected_key:
raise ValueError("For differential object '{}', expected "
"unit key = '{}' but received key = '{}'"
.format(repr(diff), expected_key, key))
# For now, we are very rigid: differentials must have the same shape
# as the representation. This makes it easier to handle __getitem__
# and any other shape-changing operations on representations that
# have associated differentials
if diff.shape != self.shape:
# TODO: message of IncompatibleShapeError is not customizable,
# so use a valueerror instead?
raise ValueError("Shape of differentials must be the same "
"as the shape of the representation ({} vs "
"{})".format(diff.shape, self.shape))
return differentials
def _raise_if_has_differentials(self, op_name):
"""
Used to raise a consistent exception for any operation that is not
supported when a representation has differentials attached.
"""
if self.differentials:
raise TypeError("Operation '{}' is not supported when "
"differentials are attached to a {}."
.format(op_name, self.__class__.__name__))
@classproperty
def _compatible_differentials(cls):
return [DIFFERENTIAL_CLASSES[cls.get_name()]]
@property
def differentials(self):
"""A dictionary of differential class instances.
The keys of this dictionary must be a string representation of the SI
unit with which the differential (derivative) is taken. For example, for
a velocity differential on a positional representation, the key would be
``'s'`` for seconds, indicating that the derivative is a time
derivative.
"""
return self._differentials
# We do not make unit_vectors and scale_factors abstract methods, since
# they are only necessary if one also defines an associated Differential.
# Also, doing so would break pre-differential representation subclasses.
def unit_vectors(self):
r"""Cartesian unit vectors in the direction of each component.
Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`,
a change in one component of :math:`\delta c` corresponds to a change
in representation of :math:`\delta c \times f_c \times \hat{e}_c`.
Returns
-------
unit_vectors : dict of `CartesianRepresentation`
The keys are the component names.
"""
raise NotImplementedError(f"{type(self)} has not implemented unit vectors")
def scale_factors(self):
r"""Scale factors for each component's direction.
Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`,
a change in one component of :math:`\delta c` corresponds to a change
in representation of :math:`\delta c \times f_c \times \hat{e}_c`.
Returns
-------
scale_factors : dict of `~astropy.units.Quantity`
The keys are the component names.
"""
raise NotImplementedError(f"{type(self)} has not implemented scale factors.")
def _re_represent_differentials(self, new_rep, differential_class):
"""Re-represent the differentials to the specified classes.
This returns a new dictionary with the same keys but with the
attached differentials converted to the new differential classes.
"""
if differential_class is None:
return dict()
if not self.differentials and differential_class:
raise ValueError("No differentials associated with this "
"representation!")
elif (len(self.differentials) == 1 and
inspect.isclass(differential_class) and
issubclass(differential_class, BaseDifferential)):
# TODO: is there a better way to do this?
differential_class = {
list(self.differentials.keys())[0]: differential_class
}
elif differential_class.keys() != self.differentials.keys():
raise ValueError("Desired differential classes must be passed in "
"as a dictionary with keys equal to a string "
"representation of the unit of the derivative "
"for each differential stored with this "
f"representation object ({self.differentials})")
new_diffs = dict()
for k in self.differentials:
diff = self.differentials[k]
try:
new_diffs[k] = diff.represent_as(differential_class[k],
base=self)
except Exception as err:
if (differential_class[k] not in
new_rep._compatible_differentials):
raise TypeError("Desired differential class {} is not "
"compatible with the desired "
"representation class {}"
.format(differential_class[k],
new_rep.__class__)) from err
else:
raise
return new_diffs
def represent_as(self, other_class, differential_class=None):
"""Convert coordinates to another representation.
If the instance is of the requested class, it is returned unmodified.
By default, conversion is done via Cartesian coordinates.
Also note that orientation information at the origin is *not* preserved by
conversions through Cartesian coordinates. See the docstring for
:meth:`~astropy.coordinates.BaseRepresentationOrDifferential.to_cartesian`
for an example.
Parameters
----------
other_class : `~astropy.coordinates.BaseRepresentation` subclass
The type of representation to turn the coordinates into.
differential_class : dict of `~astropy.coordinates.BaseDifferential`, optional
Classes in which the differentials should be represented.
Can be a single class if only a single differential is attached,
otherwise it should be a `dict` keyed by the same keys as the
differentials.
"""
if other_class is self.__class__ and not differential_class:
return self.without_differentials()
else:
if isinstance(other_class, str):
raise ValueError("Input to a representation's represent_as "
"must be a class, not a string. For "
"strings, use frame objects")
if other_class is not self.__class__:
# The default is to convert via cartesian coordinates
new_rep = other_class.from_cartesian(self.to_cartesian())
else:
new_rep = self
new_rep._differentials = self._re_represent_differentials(
new_rep, differential_class)
return new_rep
def transform(self, matrix):
"""Transform coordinates using a 3x3 matrix in a Cartesian basis.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
"""
# route transformation through Cartesian
difs_cls = {k: CartesianDifferential for k in self.differentials.keys()}
crep = self.represent_as(CartesianRepresentation,
differential_class=difs_cls
).transform(matrix)
# move back to original representation
difs_cls = {k: diff.__class__ for k, diff in self.differentials.items()}
rep = crep.represent_as(self.__class__, difs_cls)
return rep
def with_differentials(self, differentials):
"""
Create a new representation with the same positions as this
representation, but with these new differentials.
Differential keys that already exist in this object's differential dict
are overwritten.
Parameters
----------
differentials : sequence of `~astropy.coordinates.BaseDifferential` subclass instance
The differentials for the new representation to have.
Returns
-------
`~astropy.coordinates.BaseRepresentation` subclass instance
A copy of this representation, but with the ``differentials`` as
its differentials.
"""
if not differentials:
return self
args = [getattr(self, component) for component in self.components]
# We shallow copy the differentials dictionary so we don't update the
# current object's dictionary when adding new keys
new_rep = self.__class__(*args, differentials=self.differentials.copy(),
copy=False)
new_rep._differentials.update(
new_rep._validate_differentials(differentials))
return new_rep
def without_differentials(self):
"""Return a copy of the representation without attached differentials.
Returns
-------
`~astropy.coordinates.BaseRepresentation` subclass instance
A shallow copy of this representation, without any differentials.
If no differentials were present, no copy is made.
"""
if not self._differentials:
return self
args = [getattr(self, component) for component in self.components]
return self.__class__(*args, copy=False)
@classmethod
def from_representation(cls, representation):
"""Create a new instance of this representation from another one.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` instance
The presentation that should be converted to this class.
"""
return representation.represent_as(cls)
def __eq__(self, value):
"""Equality operator for BaseRepresentation
This implements strict equality and requires that the representation
classes are identical, the differentials are identical, and that the
representation data are exactly equal.
"""
# BaseRepresentationOrDifferental (checks classes and compares components)
out = super().__eq__(value)
# super() checks that the class is identical so can this even happen?
# (same class, different differentials ?)
if self._differentials.keys() != value._differentials.keys():
raise ValueError(f'cannot compare: objects must have same differentials')
for self_diff, value_diff in zip(self._differentials.values(),
value._differentials.values()):
out &= (self_diff == value_diff)
return out
def __ne__(self, value):
return np.logical_not(self == value)
def _apply(self, method, *args, **kwargs):
"""Create a new representation with ``method`` applied to the component
data.
This is not a simple inherit from ``BaseRepresentationOrDifferential``
because we need to call ``._apply()`` on any associated differential
classes.
See docstring for `BaseRepresentationOrDifferential._apply`.
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args : tuple
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
rep = super()._apply(method, *args, **kwargs)
rep._differentials = {k: diff._apply(method, *args, **kwargs)
for k, diff in self._differentials.items()}
return rep
def __setitem__(self, item, value):
if not isinstance(value, BaseRepresentation):
raise TypeError(f'value must be a representation instance, '
f'not {type(value)}.')
if not (isinstance(value, self.__class__)
or len(value.attr_classes) == len(self.attr_classes)):
raise ValueError(
f'value must be representable as {self.__class__.__name__} '
f'without loss of information.')
diff_classes = {}
if self._differentials:
if self._differentials.keys() != value._differentials.keys():
raise ValueError('value must have the same differentials.')
for key, self_diff in self._differentials.items():
diff_classes[key] = self_diff_cls = self_diff.__class__
value_diff_cls = value._differentials[key].__class__
if not (isinstance(value_diff_cls, self_diff_cls)
or (len(value_diff_cls.attr_classes)
== len(self_diff_cls.attr_classes))):
raise ValueError(
f'value differential {key!r} must be representable as '
f'{self_diff.__class__.__name__} without loss of information.')
value = value.represent_as(self.__class__, diff_classes)
super().__setitem__(item, value)
for key, differential in self._differentials.items():
differential[item] = value._differentials[key]
def _scale_operation(self, op, *args):
"""Scale all non-angular components, leaving angular ones unchanged.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc.
*args
Any arguments required for the operator (typically, what is to
be multiplied with, divided by).
"""
results = []
for component, cls in self.attr_classes.items():
value = getattr(self, component)
if issubclass(cls, Angle):
results.append(value)
else:
results.append(op(value, *args))
# try/except catches anything that cannot initialize the class, such
# as operations that returned NotImplemented or a representation
# instead of a quantity (as would happen for, e.g., rep * rep).
try:
result = self.__class__(*results)
except Exception:
return NotImplemented
for key, differential in self.differentials.items():
diff_result = differential._scale_operation(op, *args, scaled_base=True)
result.differentials[key] = diff_result
return result
def _combine_operation(self, op, other, reverse=False):
"""Combine two representation.
By default, operate on the cartesian representations of both.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
self._raise_if_has_differentials(op.__name__)
result = self.to_cartesian()._combine_operation(op, other, reverse)
if result is NotImplemented:
return NotImplemented
else:
return self.from_cartesian(result)
# We need to override this setter to support differentials
@BaseRepresentationOrDifferential.shape.setter
def shape(self, shape):
orig_shape = self.shape
# See: https://stackoverflow.com/questions/3336767/ for an example
BaseRepresentationOrDifferential.shape.fset(self, shape)
# also try to perform shape-setting on any associated differentials
try:
for k in self.differentials:
self.differentials[k].shape = shape
except Exception:
BaseRepresentationOrDifferential.shape.fset(self, orig_shape)
for k in self.differentials:
self.differentials[k].shape = orig_shape
raise
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Note that any associated differentials will be dropped during this
operation.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.sqrt(functools.reduce(
operator.add, (getattr(self, component)**2
for component, cls in self.attr_classes.items()
if not issubclass(cls, Angle))))
def mean(self, *args, **kwargs):
"""Vector mean.
Averaging is done by converting the representation to cartesian, and
taking the mean of the x, y, and z components. The result is converted
back to the same representation as the input.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
Returns
-------
mean : `~astropy.coordinates.BaseRepresentation` subclass instance
Vector mean, in the same representation as that of the input.
"""
self._raise_if_has_differentials('mean')
return self.from_cartesian(self.to_cartesian().mean(*args, **kwargs))
def sum(self, *args, **kwargs):
"""Vector sum.
Adding is done by converting the representation to cartesian, and
summing the x, y, and z components. The result is converted back to the
same representation as the input.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
Returns
-------
sum : `~astropy.coordinates.BaseRepresentation` subclass instance
Vector sum, in the same representation as that of the input.
"""
self._raise_if_has_differentials('sum')
return self.from_cartesian(self.to_cartesian().sum(*args, **kwargs))
def dot(self, other):
"""Dot product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`.
Note that any associated differentials will be dropped during this
operation.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation`
The representation to take the dot product with.
Returns
-------
dot_product : `~astropy.units.Quantity`
The sum of the product of the x, y, and z components of the
cartesian representations of ``self`` and ``other``.
"""
return self.to_cartesian().dot(other)
def cross(self, other):
"""Vector cross product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`, and converting the
result back to the type of representation of ``self``.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The representation to take the cross product with.
Returns
-------
cross_product : `~astropy.coordinates.BaseRepresentation` subclass instance
With vectors perpendicular to both ``self`` and ``other``, in the
same type of representation as ``self``.
"""
self._raise_if_has_differentials('cross')
return self.from_cartesian(self.to_cartesian().cross(other))
class CartesianRepresentation(BaseRepresentation):
"""
Representation of points in 3D cartesian coordinates.
Parameters
----------
x, y, z : `~astropy.units.Quantity` or array
The x, y, and z coordinates of the point(s). If ``x``, ``y``, and ``z``
have different shapes, they should be broadcastable. If not quantity,
``unit`` should be set. If only ``x`` is given, it is assumed that it
contains an array with the 3 coordinates stored along ``xyz_axis``.
unit : unit-like
If given, the coordinates will be converted to this unit (or taken to
be in this unit if not given.
xyz_axis : int, optional
The axis along which the coordinates are stored when a single array is
provided rather than distinct ``x``, ``y``, and ``z`` (default: 0).
differentials : dict, `CartesianDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`CartesianDifferential` instance, or a dictionary of
`CartesianDifferential` s with keys set to a string representation of
the SI unit with which the differential (derivative) is taken. For
example, for a velocity differential on a positional representation, the
key would be ``'s'`` for seconds, indicating that the derivative is a
time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {'x': u.Quantity,
'y': u.Quantity,
'z': u.Quantity}
_xyz = None
def __init__(self, x, y=None, z=None, unit=None, xyz_axis=None,
differentials=None, copy=True):
if y is None and z is None:
if isinstance(x, np.ndarray) and x.dtype.kind not in 'OV':
# Short-cut for 3-D array input.
x = u.Quantity(x, unit, copy=copy, subok=True)
# Keep a link to the array with all three coordinates
# so that we can return it quickly if needed in get_xyz.
self._xyz = x
if xyz_axis:
x = np.moveaxis(x, xyz_axis, 0)
self._xyz_axis = xyz_axis
else:
self._xyz_axis = 0
self._x, self._y, self._z = x
self._differentials = self._validate_differentials(differentials)
return
elif (isinstance(x, CartesianRepresentation)
and unit is None and xyz_axis is None):
if differentials is None:
differentials = x._differentials
return super().__init__(x, differentials=differentials,
copy=copy)
else:
x, y, z = x
if xyz_axis is not None:
raise ValueError("xyz_axis should only be set if x, y, and z are "
"in a single array passed in through x, "
"i.e., y and z should not be not given.")
if y is None or z is None:
raise ValueError("x, y, and z are required to instantiate {}"
.format(self.__class__.__name__))
if unit is not None:
x = u.Quantity(x, unit, copy=copy, subok=True)
y = u.Quantity(y, unit, copy=copy, subok=True)
z = u.Quantity(z, unit, copy=copy, subok=True)
copy = False
super().__init__(x, y, z, copy=copy, differentials=differentials)
if not (self._x.unit.is_equivalent(self._y.unit) and
self._x.unit.is_equivalent(self._z.unit)):
raise u.UnitsError("x, y, and z should have matching physical types")
def unit_vectors(self):
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
o = np.broadcast_to(0.*u.one, self.shape, subok=True)
return {
'x': CartesianRepresentation(l, o, o, copy=False),
'y': CartesianRepresentation(o, l, o, copy=False),
'z': CartesianRepresentation(o, o, l, copy=False)}
def scale_factors(self):
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return {'x': l, 'y': l, 'z': l}
def get_xyz(self, xyz_axis=0):
"""Return a vector array of the x, y, and z coordinates.
Parameters
----------
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``. Note that, if possible,
this will be a view.
"""
if self._xyz is not None:
if self._xyz_axis == xyz_axis:
return self._xyz
else:
return np.moveaxis(self._xyz, self._xyz_axis, xyz_axis)
# Create combined array. TO DO: keep it in _xyz for repeated use?
# But then in-place changes have to cancel it. Likely best to
# also update components.
return np.stack([self._x, self._y, self._z], axis=xyz_axis)
xyz = property(get_xyz)
@classmethod
def from_cartesian(cls, other):
return other
def to_cartesian(self):
return self
def transform(self, matrix):
"""
Transform the cartesian coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : ndarray
A 3x3 transformation matrix, such as a rotation matrix.
Examples
--------
We can start off by creating a cartesian representation object:
>>> from astropy import units as u
>>> from astropy.coordinates import CartesianRepresentation
>>> rep = CartesianRepresentation([1, 2] * u.pc,
... [2, 3] * u.pc,
... [3, 4] * u.pc)
We now create a rotation matrix around the z axis:
>>> from astropy.coordinates.matrix_utilities import rotation_matrix
>>> rotation = rotation_matrix(30 * u.deg, axis='z')
Finally, we can apply this transformation:
>>> rep_new = rep.transform(rotation)
>>> rep_new.xyz # doctest: +FLOAT_CMP
<Quantity [[ 1.8660254 , 3.23205081],
[ 1.23205081, 1.59807621],
[ 3. , 4. ]] pc>
"""
# erfa rxp: Multiply a p-vector by an r-matrix.
p = erfa_ufunc.rxp(matrix, self.get_xyz(xyz_axis=-1))
# transformed representation
rep = self.__class__(p, xyz_axis=-1, copy=False)
# Handle differentials attached to this representation
new_diffs = {k: d.transform(matrix, self, rep)
for k, d in self.differentials.items()}
return rep.with_differentials(new_diffs)
def _combine_operation(self, op, other, reverse=False):
self._raise_if_has_differentials(op.__name__)
try:
other_c = other.to_cartesian()
except Exception:
return NotImplemented
first, second = ((self, other_c) if not reverse else
(other_c, self))
return self.__class__(*(op(getattr(first, component),
getattr(second, component))
for component in first.components))
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Note that any associated differentials will be dropped during this
operation.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
# erfa pm: Modulus of p-vector.
return erfa_ufunc.pm(self.get_xyz(xyz_axis=-1))
def mean(self, *args, **kwargs):
"""Vector mean.
Returns a new CartesianRepresentation instance with the means of the
x, y, and z components.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials('mean')
return self._apply('mean', *args, **kwargs)
def sum(self, *args, **kwargs):
"""Vector sum.
Returns a new CartesianRepresentation instance with the sums of the
x, y, and z components.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials('sum')
return self._apply('sum', *args, **kwargs)
def dot(self, other):
"""Dot product of two representations.
Note that any associated differentials will be dropped during this
operation.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
If not already cartesian, it is converted.
Returns
-------
dot_product : `~astropy.units.Quantity`
The sum of the product of the x, y, and z components of ``self``
and ``other``.
"""
try:
other_c = other.to_cartesian()
except Exception as err:
raise TypeError("cannot only take dot product with another "
"representation, not a {} instance."
.format(type(other))) from err
# erfa pdp: p-vector inner (=scalar=dot) product.
return erfa_ufunc.pdp(self.get_xyz(xyz_axis=-1),
other_c.get_xyz(xyz_axis=-1))
def cross(self, other):
"""Cross product of two representations.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
If not already cartesian, it is converted.
Returns
-------
cross_product : `~astropy.coordinates.CartesianRepresentation`
With vectors perpendicular to both ``self`` and ``other``.
"""
self._raise_if_has_differentials('cross')
try:
other_c = other.to_cartesian()
except Exception as err:
raise TypeError("cannot only take cross product with another "
"representation, not a {} instance."
.format(type(other))) from err
# erfa pxp: p-vector outer (=vector=cross) product.
sxo = erfa_ufunc.pxp(self.get_xyz(xyz_axis=-1),
other_c.get_xyz(xyz_axis=-1))
return self.__class__(sxo, xyz_axis=-1)
class UnitSphericalRepresentation(BaseRepresentation):
"""
Representation of points on a unit sphere.
Parameters
----------
lon, lat : `~astropy.units.Quantity` ['angle'] or str
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`,
`~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {'lon': Longitude,
'lat': Latitude}
@classproperty
def _dimensional_representation(cls):
return SphericalRepresentation
def __init__(self, lon, lat=None, differentials=None, copy=True):
super().__init__(lon, lat, differentials=differentials, copy=copy)
@classproperty
def _compatible_differentials(cls):
return [UnitSphericalDifferential, UnitSphericalCosLatDifferential,
SphericalDifferential, SphericalCosLatDifferential,
RadialDifferential]
# Could let the metaclass define these automatically, but good to have
# a bit clearer docstrings.
@property
def lon(self):
"""
The longitude of the point(s).
"""
return self._lon
@property
def lat(self):
"""
The latitude of the point(s).
"""
return self._lat
def unit_vectors(self):
sinlon, coslon = np.sin(self.lon), np.cos(self.lon)
sinlat, coslat = np.sin(self.lat), np.cos(self.lat)
return {
'lon': CartesianRepresentation(-sinlon, coslon, 0., copy=False),
'lat': CartesianRepresentation(-sinlat*coslon, -sinlat*sinlon,
coslat, copy=False)}
def scale_factors(self, omit_coslat=False):
sf_lat = np.broadcast_to(1./u.radian, self.shape, subok=True)
sf_lon = sf_lat if omit_coslat else np.cos(self.lat) / u.radian
return {'lon': sf_lon,
'lat': sf_lat}
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# erfa s2c: Convert [unit]spherical coordinates to Cartesian.
p = erfa_ufunc.s2c(self.lon, self.lat)
return CartesianRepresentation(p, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
p = cart.get_xyz(xyz_axis=-1)
# erfa c2s: P-vector to [unit]spherical coordinates.
return cls(*erfa_ufunc.c2s(p), copy=False)
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
# TODO! for differential_class. This cannot (currently) be implemented
# like in the other Representations since `_re_represent_differentials`
# keeps differentials' unit keys, but this can result in a mismatch
# between the UnitSpherical expected key (e.g. "s") and that expected
# in the other class (here "s / m"). For more info, see PR #11467
if inspect.isclass(other_class) and not differential_class:
if issubclass(other_class, PhysicsSphericalRepresentation):
return other_class(phi=self.lon, theta=90 * u.deg - self.lat,
r=1.0, copy=False)
elif issubclass(other_class, SphericalRepresentation):
return other_class(lon=self.lon, lat=self.lat, distance=1.0,
copy=False)
return super().represent_as(other_class, differential_class)
def transform(self, matrix):
r"""Transform the unit-spherical coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 matrix, such as a rotation matrix (or a stack of matrices).
Returns
-------
`UnitSphericalRepresentation` or `SphericalRepresentation`
If ``matrix`` is O(3) -- :math:`M \dot M^T = I` -- like a rotation,
then the result is a `UnitSphericalRepresentation`.
All other matrices will change the distance, so the dimensional
representation is used instead.
"""
# the transformation matrix does not need to be a rotation matrix,
# so the unit-distance is not guaranteed. For speed, we check if the
# matrix is in O(3) and preserves lengths.
if np.all(is_O3(matrix)): # remain in unit-rep
xyz = erfa_ufunc.s2c(self.lon, self.lat)
p = erfa_ufunc.rxp(matrix, xyz)
lon, lat = erfa_ufunc.c2s(p)
rep = self.__class__(lon=lon, lat=lat)
# handle differentials
new_diffs = {k: d.transform(matrix, self, rep)
for k, d in self.differentials.items()}
rep = rep.with_differentials(new_diffs)
else: # switch to dimensional representation
rep = self._dimensional_representation(
lon=self.lon, lat=self.lat, distance=1,
differentials=self.differentials
).transform(matrix)
return rep
def _scale_operation(self, op, *args):
return self._dimensional_representation(
lon=self.lon, lat=self.lat, distance=1.,
differentials=self.differentials)._scale_operation(op, *args)
def __neg__(self):
if any(differential.base_representation is not self.__class__
for differential in self.differentials.values()):
return super().__neg__()
result = self.__class__(self.lon + 180. * u.deg, -self.lat, copy=False)
for key, differential in self.differentials.items():
new_comps = (op(getattr(differential, comp))
for op, comp in zip((operator.pos, operator.neg),
differential.components))
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units, which is
always unity for vectors on the unit sphere.
Returns
-------
norm : `~astropy.units.Quantity` ['dimensionless']
Dimensionless ones, with the same shape as the representation.
"""
return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled,
copy=False)
def _combine_operation(self, op, other, reverse=False):
self._raise_if_has_differentials(op.__name__)
result = self.to_cartesian()._combine_operation(op, other, reverse)
if result is NotImplemented:
return NotImplemented
else:
return self._dimensional_representation.from_cartesian(result)
def mean(self, *args, **kwargs):
"""Vector mean.
The representation is converted to cartesian, the means of the x, y,
and z components are calculated, and the result is converted to a
`~astropy.coordinates.SphericalRepresentation`.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials('mean')
return self._dimensional_representation.from_cartesian(
self.to_cartesian().mean(*args, **kwargs))
def sum(self, *args, **kwargs):
"""Vector sum.
The representation is converted to cartesian, the sums of the x, y,
and z components are calculated, and the result is converted to a
`~astropy.coordinates.SphericalRepresentation`.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials('sum')
return self._dimensional_representation.from_cartesian(
self.to_cartesian().sum(*args, **kwargs))
def cross(self, other):
"""Cross product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`, and converting the
result back to `~astropy.coordinates.SphericalRepresentation`.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The representation to take the cross product with.
Returns
-------
cross_product : `~astropy.coordinates.SphericalRepresentation`
With vectors perpendicular to both ``self`` and ``other``.
"""
self._raise_if_has_differentials('cross')
return self._dimensional_representation.from_cartesian(
self.to_cartesian().cross(other))
class RadialRepresentation(BaseRepresentation):
"""
Representation of the distance of points from the origin.
Note that this is mostly intended as an internal helper representation.
It can do little else but being used as a scale in multiplication.
Parameters
----------
distance : `~astropy.units.Quantity` ['length']
The distance of the point(s) from the origin.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {'distance': u.Quantity}
def __init__(self, distance, differentials=None, copy=True):
super().__init__(distance, differentials=differentials, copy=copy)
@property
def distance(self):
"""
The distance from the origin to the point(s).
"""
return self._distance
def unit_vectors(self):
"""Cartesian unit vectors are undefined for radial representation."""
raise NotImplementedError('Cartesian unit vectors are undefined for '
'{} instances'.format(self.__class__))
def scale_factors(self):
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return {'distance': l}
def to_cartesian(self):
"""Cannot convert radial representation to cartesian."""
raise NotImplementedError('cannot convert {} instance to cartesian.'
.format(self.__class__))
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to radial coordinate.
"""
return cls(distance=cart.norm(), copy=False)
def __mul__(self, other):
if isinstance(other, BaseRepresentation):
return self.distance * other
else:
return super().__mul__(other)
def norm(self):
"""Vector norm.
Just the distance itself.
Returns
-------
norm : `~astropy.units.Quantity` ['dimensionless']
Dimensionless ones, with the same shape as the representation.
"""
return self.distance
def _combine_operation(self, op, other, reverse=False):
return NotImplemented
def transform(self, matrix):
"""Radial representations cannot be transformed by a Cartesian matrix.
Parameters
----------
matrix : array-like
The transformation matrix in a Cartesian basis.
Must be a multiplication: a diagonal matrix with identical elements.
Must have shape (..., 3, 3), where the last 2 indices are for the
matrix on each other axis. Make sure that the matrix shape is
compatible with the shape of this representation.
Raises
------
ValueError
If the matrix is not a multiplication.
"""
scl = matrix[..., 0, 0]
# check that the matrix is a scaled identity matrix on the last 2 axes.
if np.any(matrix != scl[..., np.newaxis, np.newaxis] * np.identity(3)):
raise ValueError("Radial representations can only be "
"transformed by a scaled identity matrix")
return self * scl
def _spherical_op_funcs(op, *args):
"""For given operator, return functions that adjust lon, lat, distance."""
if op is operator.neg:
return lambda x: x+180*u.deg, operator.neg, operator.pos
try:
scale_sign = np.sign(args[0])
except Exception:
# This should always work, even if perhaps we get a negative distance.
return operator.pos, operator.pos, lambda x: op(x, *args)
scale = abs(args[0])
return (lambda x: x + 180*u.deg*np.signbit(scale_sign),
lambda x: x * scale_sign,
lambda x: op(x, scale))
class SphericalRepresentation(BaseRepresentation):
"""
Representation of points in 3D spherical coordinates.
Parameters
----------
lon, lat : `~astropy.units.Quantity` ['angle']
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`,
`~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`.
distance : `~astropy.units.Quantity` ['length']
The distance to the point(s). If the distance is a length, it is
passed to the :class:`~astropy.coordinates.Distance` class, otherwise
it is passed to the :class:`~astropy.units.Quantity` class.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {'lon': Longitude,
'lat': Latitude,
'distance': u.Quantity}
_unit_representation = UnitSphericalRepresentation
def __init__(self, lon, lat=None, distance=None, differentials=None,
copy=True):
super().__init__(lon, lat, distance, copy=copy,
differentials=differentials)
if (not isinstance(self._distance, Distance)
and self._distance.unit.physical_type == 'length'):
try:
self._distance = Distance(self._distance, copy=False)
except ValueError as e:
if e.args[0].startswith('distance must be >= 0'):
raise ValueError("Distance must be >= 0. To allow negative "
"distance values, you must explicitly pass"
" in a `Distance` object with the the "
"argument 'allow_negative=True'.") from e
else:
raise
@classproperty
def _compatible_differentials(cls):
return [UnitSphericalDifferential, UnitSphericalCosLatDifferential,
SphericalDifferential, SphericalCosLatDifferential,
RadialDifferential]
@property
def lon(self):
"""
The longitude of the point(s).
"""
return self._lon
@property
def lat(self):
"""
The latitude of the point(s).
"""
return self._lat
@property
def distance(self):
"""
The distance from the origin to the point(s).
"""
return self._distance
def unit_vectors(self):
sinlon, coslon = np.sin(self.lon), np.cos(self.lon)
sinlat, coslat = np.sin(self.lat), np.cos(self.lat)
return {
'lon': CartesianRepresentation(-sinlon, coslon, 0., copy=False),
'lat': CartesianRepresentation(-sinlat*coslon, -sinlat*sinlon,
coslat, copy=False),
'distance': CartesianRepresentation(coslat*coslon, coslat*sinlon,
sinlat, copy=False)}
def scale_factors(self, omit_coslat=False):
sf_lat = self.distance / u.radian
sf_lon = sf_lat if omit_coslat else sf_lat * np.cos(self.lat)
sf_distance = np.broadcast_to(1.*u.one, self.shape, subok=True)
return {'lon': sf_lon,
'lat': sf_lat,
'distance': sf_distance}
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
if inspect.isclass(other_class):
if issubclass(other_class, PhysicsSphericalRepresentation):
diffs = self._re_represent_differentials(other_class,
differential_class)
return other_class(phi=self.lon, theta=90 * u.deg - self.lat,
r=self.distance, differentials=diffs,
copy=False)
elif issubclass(other_class, UnitSphericalRepresentation):
diffs = self._re_represent_differentials(other_class,
differential_class)
return other_class(lon=self.lon, lat=self.lat,
differentials=diffs, copy=False)
return super().represent_as(other_class, differential_class)
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# We need to convert Distance to Quantity to allow negative values.
if isinstance(self.distance, Distance):
d = self.distance.view(u.Quantity)
else:
d = self.distance
# erfa s2p: Convert spherical polar coordinates to p-vector.
p = erfa_ufunc.s2p(self.lon, self.lat, d)
return CartesianRepresentation(p, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
p = cart.get_xyz(xyz_axis=-1)
# erfa p2s: P-vector to spherical polar coordinates.
return cls(*erfa_ufunc.p2s(p), copy=False)
def transform(self, matrix):
"""Transform the spherical coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 matrix, such as a rotation matrix (or a stack of matrices).
"""
xyz = erfa_ufunc.s2c(self.lon, self.lat)
p = erfa_ufunc.rxp(matrix, xyz)
lon, lat, ur = erfa_ufunc.p2s(p)
rep = self.__class__(lon=lon, lat=lat, distance=self.distance * ur)
# handle differentials
new_diffs = {k: d.transform(matrix, self, rep)
for k, d in self.differentials.items()}
return rep.with_differentials(new_diffs)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units. For
spherical coordinates, this is just the absolute value of the distance.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.abs(self.distance)
def _scale_operation(self, op, *args):
# TODO: expand special-casing to UnitSpherical and RadialDifferential.
if any(differential.base_representation is not self.__class__
for differential in self.differentials.values()):
return super()._scale_operation(op, *args)
lon_op, lat_op, distance_op = _spherical_op_funcs(op, *args)
result = self.__class__(lon_op(self.lon), lat_op(self.lat),
distance_op(self.distance), copy=False)
for key, differential in self.differentials.items():
new_comps = (op(getattr(differential, comp)) for op, comp in zip(
(operator.pos, lat_op, distance_op),
differential.components))
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
class PhysicsSphericalRepresentation(BaseRepresentation):
"""
Representation of points in 3D spherical coordinates (using the physics
convention of using ``phi`` and ``theta`` for azimuth and inclination
from the pole).
Parameters
----------
phi, theta : `~astropy.units.Quantity` or str
The azimuth and inclination of the point(s), in angular units. The
inclination should be between 0 and 180 degrees, and the azimuth will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`. If ``copy`` is False, `phi`
will be changed inplace if it is not between 0 and 360 degrees.
r : `~astropy.units.Quantity`
The distance to the point(s). If the distance is a length, it is
passed to the :class:`~astropy.coordinates.Distance` class, otherwise
it is passed to the :class:`~astropy.units.Quantity` class.
differentials : dict, `PhysicsSphericalDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`PhysicsSphericalDifferential` instance, or a dictionary of of
differential instances with keys set to a string representation of the
SI unit with which the differential (derivative) is taken. For example,
for a velocity differential on a positional representation, the key
would be ``'s'`` for seconds, indicating that the derivative is a time
derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {'phi': Angle,
'theta': Angle,
'r': u.Quantity}
def __init__(self, phi, theta=None, r=None, differentials=None, copy=True):
super().__init__(phi, theta, r, copy=copy, differentials=differentials)
# Wrap/validate phi/theta
# Note that _phi already holds our own copy if copy=True.
self._phi.wrap_at(360 * u.deg, inplace=True)
# This invalid catch block can be removed when the minimum numpy
# version is >= 1.19 (NUMPY_LT_1_19)
with np.errstate(invalid='ignore'):
if np.any(self._theta < 0.*u.deg) or np.any(self._theta > 180.*u.deg):
raise ValueError('Inclination angle(s) must be within '
'0 deg <= angle <= 180 deg, '
'got {}'.format(theta.to(u.degree)))
if self._r.unit.physical_type == 'length':
self._r = self._r.view(Distance)
@property
def phi(self):
"""
The azimuth of the point(s).
"""
return self._phi
@property
def theta(self):
"""
The elevation of the point(s).
"""
return self._theta
@property
def r(self):
"""
The distance from the origin to the point(s).
"""
return self._r
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
sintheta, costheta = np.sin(self.theta), np.cos(self.theta)
return {
'phi': CartesianRepresentation(-sinphi, cosphi, 0., copy=False),
'theta': CartesianRepresentation(costheta*cosphi,
costheta*sinphi,
-sintheta, copy=False),
'r': CartesianRepresentation(sintheta*cosphi, sintheta*sinphi,
costheta, copy=False)}
def scale_factors(self):
r = self.r / u.radian
sintheta = np.sin(self.theta)
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return {'phi': r * sintheta,
'theta': r,
'r': l}
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
if inspect.isclass(other_class):
if issubclass(other_class, SphericalRepresentation):
diffs = self._re_represent_differentials(other_class,
differential_class)
return other_class(lon=self.phi, lat=90 * u.deg - self.theta,
distance=self.r, differentials=diffs,
copy=False)
elif issubclass(other_class, UnitSphericalRepresentation):
diffs = self._re_represent_differentials(other_class,
differential_class)
return other_class(lon=self.phi, lat=90 * u.deg - self.theta,
differentials=diffs, copy=False)
return super().represent_as(other_class, differential_class)
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# We need to convert Distance to Quantity to allow negative values.
if isinstance(self.r, Distance):
d = self.r.view(u.Quantity)
else:
d = self.r
x = d * np.sin(self.theta) * np.cos(self.phi)
y = d * np.sin(self.theta) * np.sin(self.phi)
z = d * np.cos(self.theta)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
s = np.hypot(cart.x, cart.y)
r = np.hypot(s, cart.z)
phi = np.arctan2(cart.y, cart.x)
theta = np.arctan2(s, cart.z)
return cls(phi=phi, theta=theta, r=r, copy=False)
def transform(self, matrix):
"""Transform the spherical coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 matrix, such as a rotation matrix (or a stack of matrices).
"""
# apply transformation in unit-spherical coordinates
xyz = erfa_ufunc.s2c(self.phi, 90*u.deg-self.theta)
p = erfa_ufunc.rxp(matrix, xyz)
lon, lat, ur = erfa_ufunc.p2s(p) # `ur` is transformed unit-`r`
# create transformed physics-spherical representation,
# reapplying the distance scaling
rep = self.__class__(phi=lon, theta=90*u.deg-lat, r=self.r * ur)
new_diffs = {k: d.transform(matrix, self, rep)
for k, d in self.differentials.items()}
return rep.with_differentials(new_diffs)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units. For
spherical coordinates, this is just the absolute value of the radius.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.abs(self.r)
def _scale_operation(self, op, *args):
if any(differential.base_representation is not self.__class__
for differential in self.differentials.values()):
return super()._scale_operation(op, *args)
phi_op, adjust_theta_sign, r_op = _spherical_op_funcs(op, *args)
# Also run phi_op on theta to ensure theta remains between 0 and 180:
# any time the scale is negative, we do -theta + 180 degrees.
result = self.__class__(phi_op(self.phi),
phi_op(adjust_theta_sign(self.theta)),
r_op(self.r), copy=False)
for key, differential in self.differentials.items():
new_comps = (op(getattr(differential, comp)) for op, comp in zip(
(operator.pos, adjust_theta_sign, r_op),
differential.components))
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
class CylindricalRepresentation(BaseRepresentation):
"""
Representation of points in 3D cylindrical coordinates.
Parameters
----------
rho : `~astropy.units.Quantity`
The distance from the z axis to the point(s).
phi : `~astropy.units.Quantity` or str
The azimuth of the point(s), in angular units, which will be wrapped
to an angle between 0 and 360 degrees. This can also be instances of
`~astropy.coordinates.Angle`,
z : `~astropy.units.Quantity`
The z coordinate(s) of the point(s)
differentials : dict, `CylindricalDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`CylindricalDifferential` instance, or a dictionary of of differential
instances with keys set to a string representation of the SI unit with
which the differential (derivative) is taken. For example, for a
velocity differential on a positional representation, the key would be
``'s'`` for seconds, indicating that the derivative is a time
derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {'rho': u.Quantity,
'phi': Angle,
'z': u.Quantity}
def __init__(self, rho, phi=None, z=None, differentials=None, copy=True):
super().__init__(rho, phi, z, copy=copy, differentials=differentials)
if not self._rho.unit.is_equivalent(self._z.unit):
raise u.UnitsError("rho and z should have matching physical types")
@property
def rho(self):
"""
The distance of the point(s) from the z-axis.
"""
return self._rho
@property
def phi(self):
"""
The azimuth of the point(s).
"""
return self._phi
@property
def z(self):
"""
The height of the point(s).
"""
return self._z
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
l = np.broadcast_to(1., self.shape)
return {
'rho': CartesianRepresentation(cosphi, sinphi, 0, copy=False),
'phi': CartesianRepresentation(-sinphi, cosphi, 0, copy=False),
'z': CartesianRepresentation(0, 0, l, unit=u.one, copy=False)}
def scale_factors(self):
rho = self.rho / u.radian
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return {'rho': l,
'phi': rho,
'z': l}
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to cylindrical polar
coordinates.
"""
rho = np.hypot(cart.x, cart.y)
phi = np.arctan2(cart.y, cart.x)
z = cart.z
return cls(rho=rho, phi=phi, z=z, copy=False)
def to_cartesian(self):
"""
Converts cylindrical polar coordinates to 3D rectangular cartesian
coordinates.
"""
x = self.rho * np.cos(self.phi)
y = self.rho * np.sin(self.phi)
z = self.z
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
def _scale_operation(self, op, *args):
if any(differential.base_representation is not self.__class__
for differential in self.differentials.values()):
return super()._scale_operation(op, *args)
phi_op, _, rho_op = _spherical_op_funcs(op, *args)
z_op = lambda x: op(x, *args)
result = self.__class__(rho_op(self.rho), phi_op(self.phi),
z_op(self.z), copy=False)
for key, differential in self.differentials.items():
new_comps = (op(getattr(differential, comp)) for op, comp in zip(
(rho_op, operator.pos, z_op), differential.components))
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
class BaseDifferential(BaseRepresentationOrDifferential):
r"""A base class representing differentials of representations.
These represent differences or derivatives along each component.
E.g., for physics spherical coordinates, these would be
:math:`\delta r, \delta \theta, \delta \phi`.
Parameters
----------
d_comp1, d_comp2, d_comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D differentials. The names are the keys and the
subclasses the values of the ``attr_classes`` attribute.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
Notes
-----
All differential representation classes should subclass this base class,
and define an ``base_representation`` attribute with the class of the
regular `~astropy.coordinates.BaseRepresentation` for which differential
coordinates are provided. This will set up a default ``attr_classes``
instance with names equal to the base component names prefixed by ``d_``,
and all classes set to `~astropy.units.Quantity`, plus properties to access
those, and a default ``__init__`` for initialization.
"""
def __init_subclass__(cls, **kwargs):
"""Set default ``attr_classes`` and component getters on a Differential.
class BaseDifferential(BaseRepresentationOrDifferential):
For these, the components are those of the base representation prefixed
by 'd_', and the class is `~astropy.units.Quantity`.
"""
# Don't do anything for base helper classes.
if cls.__name__ in ('BaseDifferential', 'BaseSphericalDifferential',
'BaseSphericalCosLatDifferential'):
return
if not hasattr(cls, 'base_representation'):
raise NotImplementedError('Differential representations must have a'
'"base_representation" class attribute.')
# If not defined explicitly, create attr_classes.
if not hasattr(cls, 'attr_classes'):
base_attr_classes = cls.base_representation.attr_classes
cls.attr_classes = {'d_' + c: u.Quantity
for c in base_attr_classes}
repr_name = cls.get_name()
if repr_name in DIFFERENTIAL_CLASSES:
raise ValueError(f"Differential class {repr_name} already defined")
DIFFERENTIAL_CLASSES[repr_name] = cls
_invalidate_reprdiff_cls_hash()
# If not defined explicitly, create properties for the components.
for component in cls.attr_classes:
if not hasattr(cls, component):
setattr(cls, component,
property(_make_getter(component),
doc=f"Component '{component}' of the Differential."))
super().__init_subclass__(**kwargs)
@classmethod
def _check_base(cls, base):
if cls not in base._compatible_differentials:
raise TypeError(f"Differential class {cls} is not compatible with the "
f"base (representation) class {base.__class__}")
def _get_deriv_key(self, base):
"""Given a base (representation instance), determine the unit of the
derivative by removing the representation unit from the component units
of this differential.
"""
# This check is just a last resort so we don't return a strange unit key
# from accidentally passing in the wrong base.
self._check_base(base)
for name in base.components:
comp = getattr(base, name)
d_comp = getattr(self, f'd_{name}', None)
if d_comp is not None:
d_unit = comp.unit / d_comp.unit
# This is quite a bit faster than using to_system() or going
# through Quantity()
d_unit_si = d_unit.decompose(u.si.bases)
d_unit_si._scale = 1 # remove the scale from the unit
return str(d_unit_si)
else:
raise RuntimeError("Invalid representation-differential units! This"
" likely happened because either the "
"representation or the associated differential "
"have non-standard units. Check that the input "
"positional data have positional units, and the "
"input velocity data have velocity units, or "
"are both dimensionless.")
@classmethod
def _get_base_vectors(cls, base):
"""Get unit vectors and scale factors from base.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the unit vectors and scale factors should be
retrieved.
Returns
-------
unit_vectors : dict of `CartesianRepresentation`
In the directions of the coordinates of base.
scale_factors : dict of `~astropy.units.Quantity`
Scale factors for each of the coordinates
Raises
------
TypeError : if the base is not of the correct type
"""
cls._check_base(base)
return base.unit_vectors(), base.scale_factors()
def to_cartesian(self, base):
"""Convert the differential to 3D rectangular cartesian coordinates.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the differentials are to be converted: each of
the components is multiplied by its unit vectors and scale factors.
Returns
-------
`CartesianDifferential`
This object, converted.
"""
base_e, base_sf = self._get_base_vectors(base)
return functools.reduce(
operator.add, (getattr(self, d_c) * base_sf[c] * base_e[c]
for d_c, c in zip(self.components, base.components)))
@classmethod
def from_cartesian(cls, other, base):
"""Convert the differential from 3D rectangular cartesian coordinates to
the desired class.
Parameters
----------
other
The object to convert into this differential.
base : `BaseRepresentation`
The points for which the differentials are to be converted: each of
the components is multiplied by its unit vectors and scale factors.
Will be converted to ``cls.base_representation`` if needed.
Returns
-------
`BaseDifferential` subclass instance
A new differential object that is this class' type.
"""
base = base.represent_as(cls.base_representation)
base_e, base_sf = cls._get_base_vectors(base)
return cls(*(other.dot(e / base_sf[component])
for component, e in base_e.items()), copy=False)
def represent_as(self, other_class, base):
"""Convert coordinates to another representation.
If the instance is of the requested class, it is returned unmodified.
By default, conversion is done via cartesian coordinates.
Parameters
----------
other_class : `~astropy.coordinates.BaseRepresentation` subclass
The type of representation to turn the coordinates into.
base : instance of ``self.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
"""
if other_class is self.__class__:
return self
# The default is to convert via cartesian coordinates.
self_cartesian = self.to_cartesian(base)
if issubclass(other_class, BaseDifferential):
return other_class.from_cartesian(self_cartesian, base)
else:
return other_class.from_cartesian(self_cartesian)
@classmethod
def from_representation(cls, representation, base):
"""Create a new instance of this representation from another one.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` instance
The presentation that should be converted to this class.
base : instance of ``cls.base_representation``
The base relative to which the differentials will be defined. If
the representation is a differential itself, the base will be
converted to its ``base_representation`` to help convert it.
"""
if isinstance(representation, BaseDifferential):
cartesian = representation.to_cartesian(
base.represent_as(representation.base_representation))
else:
cartesian = representation.to_cartesian()
return cls.from_cartesian(cartesian, base)
def transform(self, matrix, base, transformed_base):
"""Transform differential using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base : instance of ``cls.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
transformed_base : instance of ``cls.base_representation``
Base relative to which the transformed differentials are defined.
If the other class is a differential representation, the base will
be converted to its ``base_representation``.
"""
# route transformation through Cartesian
cdiff = self.represent_as(CartesianDifferential, base=base
).transform(matrix)
# move back to original representation
diff = cdiff.represent_as(self.__class__, transformed_base)
return diff
def _scale_operation(self, op, *args, scaled_base=False):
"""Scale all components.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc.
*args
Any arguments required for the operator (typically, what is to
be multiplied with, divided by).
scaled_base : bool, optional
Whether the base was scaled the same way. This affects whether
differential components should be scaled. For instance, a differential
in longitude should not be scaled if its spherical base is scaled
in radius.
"""
scaled_attrs = [op(getattr(self, c), *args) for c in self.components]
return self.__class__(*scaled_attrs, copy=False)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If ``other`` is a representation,
it will be used as a base for which to evaluate the differential,
and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if isinstance(self, type(other)):
first, second = (self, other) if not reverse else (other, self)
return self.__class__(*[op(getattr(first, c), getattr(second, c))
for c in self.components])
else:
try:
self_cartesian = self.to_cartesian(other)
except TypeError:
return NotImplemented
return other._combine_operation(op, self_cartesian, not reverse)
def __sub__(self, other):
# avoid "differential - representation".
if isinstance(other, BaseRepresentation):
return NotImplemented
return super().__sub__(other)
def norm(self, base=None):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Parameters
----------
base : instance of ``self.base_representation``
Base relative to which the differentials are defined. This is
required to calculate the physical size of the differential for
all but Cartesian differentials or radial differentials.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
# RadialDifferential overrides this function, so there is no handling here
if not isinstance(self, CartesianDifferential) and base is None:
raise ValueError("`base` must be provided to calculate the norm of a"
f" {type(self).__name__}")
return self.to_cartesian(base).norm()
class CartesianDifferential(BaseDifferential):
"""Differentials in of points in 3D cartesian coordinates.
Parameters
----------
d_x, d_y, d_z : `~astropy.units.Quantity` or array
The x, y, and z coordinates of the differentials. If ``d_x``, ``d_y``,
and ``d_z`` have different shapes, they should be broadcastable. If not
quantities, ``unit`` should be set. If only ``d_x`` is given, it is
assumed that it contains an array with the 3 coordinates stored along
``xyz_axis``.
unit : `~astropy.units.Unit` or str
If given, the differentials will be converted to this unit (or taken to
be in this unit if not given.
xyz_axis : int, optional
The axis along which the coordinates are stored when a single array is
provided instead of distinct ``d_x``, ``d_y``, and ``d_z`` (default: 0).
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = CartesianRepresentation
_d_xyz = None
def __init__(self, d_x, d_y=None, d_z=None, unit=None, xyz_axis=None,
copy=True):
if d_y is None and d_z is None:
if isinstance(d_x, np.ndarray) and d_x.dtype.kind not in 'OV':
# Short-cut for 3-D array input.
d_x = u.Quantity(d_x, unit, copy=copy, subok=True)
# Keep a link to the array with all three coordinates
# so that we can return it quickly if needed in get_xyz.
self._d_xyz = d_x
if xyz_axis:
d_x = np.moveaxis(d_x, xyz_axis, 0)
self._xyz_axis = xyz_axis
else:
self._xyz_axis = 0
self._d_x, self._d_y, self._d_z = d_x
return
else:
d_x, d_y, d_z = d_x
if xyz_axis is not None:
raise ValueError("xyz_axis should only be set if d_x, d_y, and d_z "
"are in a single array passed in through d_x, "
"i.e., d_y and d_z should not be not given.")
if d_y is None or d_z is None:
raise ValueError("d_x, d_y, and d_z are required to instantiate {}"
.format(self.__class__.__name__))
if unit is not None:
d_x = u.Quantity(d_x, unit, copy=copy, subok=True)
d_y = u.Quantity(d_y, unit, copy=copy, subok=True)
d_z = u.Quantity(d_z, unit, copy=copy, subok=True)
copy = False
super().__init__(d_x, d_y, d_z, copy=copy)
if not (self._d_x.unit.is_equivalent(self._d_y.unit) and
self._d_x.unit.is_equivalent(self._d_z.unit)):
raise u.UnitsError('d_x, d_y and d_z should have equivalent units.')
def to_cartesian(self, base=None):
return CartesianRepresentation(*[getattr(self, c) for c
in self.components])
@classmethod
def from_cartesian(cls, other, base=None):
return cls(*[getattr(other, c) for c in other.components])
def transform(self, matrix, base=None, transformed_base=None):
"""Transform differentials using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base, transformed_base : `~astropy.coordinates.CartesianRepresentation` or None, optional
Not used in the Cartesian transformation.
"""
# erfa rxp: Multiply a p-vector by an r-matrix.
p = erfa_ufunc.rxp(matrix, self.get_d_xyz(xyz_axis=-1))
return self.__class__(p, xyz_axis=-1, copy=False)
def get_d_xyz(self, xyz_axis=0):
"""Return a vector array of the x, y, and z coordinates.
Parameters
----------
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
d_xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``. Note that, if possible,
this will be a view.
"""
if self._d_xyz is not None:
if self._xyz_axis == xyz_axis:
return self._d_xyz
else:
return np.moveaxis(self._d_xyz, self._xyz_axis, xyz_axis)
# Create combined array. TO DO: keep it in _d_xyz for repeated use?
# But then in-place changes have to cancel it. Likely best to
# also update components.
return np.stack([self._d_x, self._d_y, self._d_z], axis=xyz_axis)
d_xyz = property(get_d_xyz)
class BaseSphericalDifferential(BaseDifferential):
def _d_lon_coslat(self, base):
"""Convert longitude differential d_lon to d_lon_coslat.
Parameters
----------
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
self._check_base(base)
return self.d_lon * np.cos(base.lat)
@classmethod
def _get_d_lon(cls, d_lon_coslat, base):
"""Convert longitude differential d_lon_coslat to d_lon.
Parameters
----------
d_lon_coslat : `~astropy.units.Quantity`
Longitude differential that includes ``cos(lat)``.
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
cls._check_base(base)
return d_lon_coslat / np.cos(base.lat)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If both are different parts of
a `~astropy.coordinates.SphericalDifferential` (e.g., a
`~astropy.coordinates.UnitSphericalDifferential` and a
`~astropy.coordinates.RadialDifferential`), they will combined
appropriately.
If ``other`` is a representation, it will be used as a base for which
to evaluate the differential, and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if (isinstance(other, BaseSphericalDifferential) and
not isinstance(self, type(other)) or
isinstance(other, RadialDifferential)):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.))
for c in all_components}
return SphericalDifferential(**result_args)
return super()._combine_operation(op, other, reverse)
class UnitSphericalDifferential(BaseSphericalDifferential):
"""Differential(s) of points on a unit sphere.
Parameters
----------
d_lon, d_lat : `~astropy.units.Quantity`
The longitude and latitude of the differentials.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = UnitSphericalRepresentation
@classproperty
def _dimensional_differential(cls):
return SphericalDifferential
def __init__(self, d_lon, d_lat=None, copy=True):
super().__init__(d_lon, d_lat, copy=copy)
if not self._d_lon.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError('d_lon and d_lat should have equivalent units.')
@classmethod
def from_cartesian(cls, other, base):
# Go via the dimensional equivalent, so that the longitude and latitude
# differentials correctly take into account the norm of the base.
dimensional = cls._dimensional_differential.from_cartesian(other, base)
return dimensional.represent_as(cls)
def to_cartesian(self, base):
if isinstance(base, SphericalRepresentation):
scale = base.distance
elif isinstance(base, PhysicsSphericalRepresentation):
scale = base.r
else:
return super().to_cartesian(base)
base = base.represent_as(UnitSphericalRepresentation)
return scale * super().to_cartesian(base)
def represent_as(self, other_class, base=None):
# Only have enough information to represent other unit-spherical.
if issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if isinstance(representation, SphericalDifferential):
return cls(representation.d_lon, representation.d_lat)
elif isinstance(representation, (SphericalCosLatDifferential,
UnitSphericalCosLatDifferential)):
d_lon = cls._get_d_lon(representation.d_lon_coslat, base)
return cls(d_lon, representation.d_lat)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_phi, -representation.d_theta)
return super().from_representation(representation, base)
def transform(self, matrix, base, transformed_base):
"""Transform differential using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base : instance of ``cls.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
transformed_base : instance of ``cls.base_representation``
Base relative to which the transformed differentials are defined.
If the other class is a differential representation, the base will
be converted to its ``base_representation``.
"""
# the transformation matrix does not need to be a rotation matrix,
# so the unit-distance is not guaranteed. For speed, we check if the
# matrix is in O(3) and preserves lengths.
if np.all(is_O3(matrix)): # remain in unit-rep
# TODO! implement without Cartesian intermediate step.
# some of this can be moved to the parent class.
diff = super().transform(matrix, base, transformed_base)
else: # switch to dimensional representation
du = self.d_lon.unit / base.lon.unit # derivative unit
diff = self._dimensional_differential(
d_lon=self.d_lon, d_lat=self.d_lat, d_distance=0 * du
).transform(matrix, base, transformed_base)
return diff
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.copy()
else:
return super()._scale_operation(op, *args)
class SphericalDifferential(BaseSphericalDifferential):
"""Differential(s) of points in 3D spherical coordinates.
Parameters
----------
d_lon, d_lat : `~astropy.units.Quantity`
The differential longitude and latitude.
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = SphericalRepresentation
_unit_differential = UnitSphericalDifferential
def __init__(self, d_lon, d_lat=None, d_distance=None, copy=True):
super().__init__(d_lon, d_lat, d_distance, copy=copy)
if not self._d_lon.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError('d_lon and d_lat should have equivalent units.')
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if issubclass(other_class, UnitSphericalDifferential):
return other_class(self.d_lon, self.d_lat)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_distance)
elif issubclass(other_class, SphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat,
self.d_distance)
elif issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat)
elif issubclass(other_class, PhysicsSphericalDifferential):
return other_class(self.d_lon, -self.d_lat, self.d_distance)
else:
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if isinstance(representation, SphericalCosLatDifferential):
d_lon = cls._get_d_lon(representation.d_lon_coslat, base)
return cls(d_lon, representation.d_lat, representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_phi, -representation.d_theta,
representation.d_r)
return super().from_representation(representation, base)
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.__class__(self.d_lon, self.d_lat, op(self.d_distance, *args))
else:
return super()._scale_operation(op, *args)
class BaseSphericalCosLatDifferential(BaseDifferential):
"""Differentials from points on a spherical base representation.
With cos(lat) assumed to be included in the longitude differential.
"""
@classmethod
def _get_base_vectors(cls, base):
"""Get unit vectors and scale factors from (unit)spherical base.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the unit vectors and scale factors should be
retrieved.
Returns
-------
unit_vectors : dict of `CartesianRepresentation`
In the directions of the coordinates of base.
scale_factors : dict of `~astropy.units.Quantity`
Scale factors for each of the coordinates. The scale factor for
longitude does not include the cos(lat) factor.
Raises
------
TypeError : if the base is not of the correct type
"""
cls._check_base(base)
return base.unit_vectors(), base.scale_factors(omit_coslat=True)
def _d_lon(self, base):
"""Convert longitude differential with cos(lat) to one without.
Parameters
----------
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
self._check_base(base)
return self.d_lon_coslat / np.cos(base.lat)
@classmethod
def _get_d_lon_coslat(cls, d_lon, base):
"""Convert longitude differential d_lon to d_lon_coslat.
Parameters
----------
d_lon : `~astropy.units.Quantity`
Value of the longitude differential without ``cos(lat)``.
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
cls._check_base(base)
return d_lon * np.cos(base.lat)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If both are different parts of
a `~astropy.coordinates.SphericalDifferential` (e.g., a
`~astropy.coordinates.UnitSphericalDifferential` and a
`~astropy.coordinates.RadialDifferential`), they will combined
appropriately.
If ``other`` is a representation, it will be used as a base for which
to evaluate the differential, and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if (isinstance(other, BaseSphericalCosLatDifferential) and
not isinstance(self, type(other)) or
isinstance(other, RadialDifferential)):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.))
for c in all_components}
return SphericalCosLatDifferential(**result_args)
return super()._combine_operation(op, other, reverse)
class UnitSphericalCosLatDifferential(BaseSphericalCosLatDifferential):
"""Differential(s) of points on a unit sphere.
Parameters
----------
d_lon_coslat, d_lat : `~astropy.units.Quantity`
The longitude and latitude of the differentials.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = UnitSphericalRepresentation
attr_classes = {'d_lon_coslat': u.Quantity,
'd_lat': u.Quantity}
@classproperty
def _dimensional_differential(cls):
return SphericalCosLatDifferential
def __init__(self, d_lon_coslat, d_lat=None, copy=True):
super().__init__(d_lon_coslat, d_lat, copy=copy)
if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError('d_lon_coslat and d_lat should have equivalent '
'units.')
@classmethod
def from_cartesian(cls, other, base):
# Go via the dimensional equivalent, so that the longitude and latitude
# differentials correctly take into account the norm of the base.
dimensional = cls._dimensional_differential.from_cartesian(other, base)
return dimensional.represent_as(cls)
def to_cartesian(self, base):
if isinstance(base, SphericalRepresentation):
scale = base.distance
elif isinstance(base, PhysicsSphericalRepresentation):
scale = base.r
else:
return super().to_cartesian(base)
base = base.represent_as(UnitSphericalRepresentation)
return scale * super().to_cartesian(base)
def represent_as(self, other_class, base=None):
# Only have enough information to represent other unit-spherical.
if issubclass(other_class, UnitSphericalDifferential):
return other_class(self._d_lon(base), self.d_lat)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# All spherical differentials can be done without going to Cartesian,
# though w/o CosLat needs base for the latitude.
if isinstance(representation, SphericalCosLatDifferential):
return cls(representation.d_lon_coslat, representation.d_lat)
elif isinstance(representation, (SphericalDifferential,
UnitSphericalDifferential)):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base)
return cls(d_lon_coslat, representation.d_lat)
elif isinstance(representation, PhysicsSphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base)
return cls(d_lon_coslat, -representation.d_theta)
return super().from_representation(representation, base)
def transform(self, matrix, base, transformed_base):
"""Transform differential using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base : instance of ``cls.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
transformed_base : instance of ``cls.base_representation``
Base relative to which the transformed differentials are defined.
If the other class is a differential representation, the base will
be converted to its ``base_representation``.
"""
# the transformation matrix does not need to be a rotation matrix,
# so the unit-distance is not guaranteed. For speed, we check if the
# matrix is in O(3) and preserves lengths.
if np.all(is_O3(matrix)): # remain in unit-rep
# TODO! implement without Cartesian intermediate step.
diff = super().transform(matrix, base, transformed_base)
else: # switch to dimensional representation
du = self.d_lat.unit / base.lat.unit # derivative unit
diff = self._dimensional_differential(
d_lon_coslat=self.d_lon_coslat, d_lat=self.d_lat,
d_distance=0 * du
).transform(matrix, base, transformed_base)
return diff
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.copy()
else:
return super()._scale_operation(op, *args)
class SphericalCosLatDifferential(BaseSphericalCosLatDifferential):
"""Differential(s) of points in 3D spherical coordinates.
Parameters
----------
d_lon_coslat, d_lat : `~astropy.units.Quantity`
The differential longitude (with cos(lat) included) and latitude.
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = SphericalRepresentation
_unit_differential = UnitSphericalCosLatDifferential
attr_classes = {'d_lon_coslat': u.Quantity,
'd_lat': u.Quantity,
'd_distance': u.Quantity}
def __init__(self, d_lon_coslat, d_lat=None, d_distance=None, copy=True):
super().__init__(d_lon_coslat, d_lat, d_distance, copy=copy)
if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError('d_lon_coslat and d_lat should have equivalent '
'units.')
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though some need base for the latitude to remove cos(lat).
if issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self.d_lon_coslat, self.d_lat)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_distance)
elif issubclass(other_class, SphericalDifferential):
return other_class(self._d_lon(base), self.d_lat, self.d_distance)
elif issubclass(other_class, UnitSphericalDifferential):
return other_class(self._d_lon(base), self.d_lat)
elif issubclass(other_class, PhysicsSphericalDifferential):
return other_class(self._d_lon(base), -self.d_lat, self.d_distance)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though we need base for the latitude to remove coslat.
if isinstance(representation, SphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base)
return cls(d_lon_coslat, representation.d_lat,
representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base)
return cls(d_lon_coslat, -representation.d_theta,
representation.d_r)
return super().from_representation(representation, base)
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.__class__(self.d_lon_coslat, self.d_lat, op(self.d_distance, *args))
else:
return super()._scale_operation(op, *args)
class RadialDifferential(BaseDifferential):
"""Differential(s) of radial distances.
Parameters
----------
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = RadialRepresentation
def to_cartesian(self, base):
return self.d_distance * base.represent_as(
UnitSphericalRepresentation).to_cartesian()
def norm(self, base=None):
return self.d_distance
@classmethod
def from_cartesian(cls, other, base):
return cls(other.dot(base.represent_as(UnitSphericalRepresentation)),
copy=False)
@classmethod
def from_representation(cls, representation, base=None):
if isinstance(representation, (SphericalDifferential,
SphericalCosLatDifferential)):
return cls(representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_r)
else:
return super().from_representation(representation, base)
def _combine_operation(self, op, other, reverse=False):
if isinstance(other, self.base_representation):
if reverse:
first, second = other.distance, self.d_distance
else:
first, second = self.d_distance, other.distance
return other.__class__(op(first, second), copy=False)
elif isinstance(other, (BaseSphericalDifferential,
BaseSphericalCosLatDifferential)):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.))
for c in all_components}
return SphericalDifferential(**result_args)
else:
return super()._combine_operation(op, other, reverse)
class PhysicsSphericalDifferential(BaseDifferential):
"""Differential(s) of 3D spherical coordinates using physics convention.
Parameters
----------
d_phi, d_theta : `~astropy.units.Quantity`
The differential azimuth and inclination.
d_r : `~astropy.units.Quantity`
The differential radial distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = PhysicsSphericalRepresentation
def __init__(self, d_phi, d_theta=None, d_r=None, copy=True):
super().__init__(d_phi, d_theta, d_r, copy=copy)
if not self._d_phi.unit.is_equivalent(self._d_theta.unit):
raise u.UnitsError('d_phi and d_theta should have equivalent '
'units.')
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude. For those, explicitly
# do the equivalent of self._d_lon_coslat in SphericalDifferential.
if issubclass(other_class, SphericalDifferential):
return other_class(self.d_phi, -self.d_theta, self.d_r)
elif issubclass(other_class, UnitSphericalDifferential):
return other_class(self.d_phi, -self.d_theta)
elif issubclass(other_class, SphericalCosLatDifferential):
self._check_base(base)
d_lon_coslat = self.d_phi * np.sin(base.theta)
return other_class(d_lon_coslat, -self.d_theta, self.d_r)
elif issubclass(other_class, UnitSphericalCosLatDifferential):
self._check_base(base)
d_lon_coslat = self.d_phi * np.sin(base.theta)
return other_class(d_lon_coslat, -self.d_theta)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_r)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though we need base for the latitude to remove coslat. For that case,
# do the equivalent of cls._d_lon in SphericalDifferential.
if isinstance(representation, SphericalDifferential):
return cls(representation.d_lon, -representation.d_lat,
representation.d_distance)
elif isinstance(representation, SphericalCosLatDifferential):
cls._check_base(base)
d_phi = representation.d_lon_coslat / np.sin(base.theta)
return cls(d_phi, -representation.d_lat, representation.d_distance)
return super().from_representation(representation, base)
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.__class__(self.d_phi, self.d_theta, op(self.d_r, *args))
else:
return super()._scale_operation(op, *args)
class CylindricalDifferential(BaseDifferential):
"""Differential(s) of points in cylindrical coordinates.
Parameters
----------
d_rho : `~astropy.units.Quantity` ['speed']
The differential cylindrical radius.
d_phi : `~astropy.units.Quantity` ['angular speed']
The differential azimuth.
d_z : `~astropy.units.Quantity` ['speed']
The differential height.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = CylindricalRepresentation
def __init__(self, d_rho, d_phi=None, d_z=None, copy=False):
super().__init__(d_rho, d_phi, d_z, copy=copy)
if not self._d_rho.unit.is_equivalent(self._d_z.unit):
raise u.UnitsError("d_rho and d_z should have equivalent units.")
|
0970b501a50e042dad8e988004ce8c7133591b390bbd2c95b23a9bec54073876 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains utility functions for working with angles. These are both
used internally in astropy.coordinates.angles, and of possible
"""
__all__ = ['angular_separation', 'position_angle', 'offset_by',
'golden_spiral_grid', 'uniform_spherical_random_surface',
'uniform_spherical_random_volume']
# Third-party
import numpy as np
# Astropy
import astropy.units as u
from astropy.coordinates.representation import (
UnitSphericalRepresentation,
SphericalRepresentation)
def angular_separation(lon1, lat1, lon2, lat2):
"""
Angular separation between two points on a sphere.
Parameters
----------
lon1, lat1, lon2, lat2 : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the two points. Quantities should be in
angular units; floats in radians.
Returns
-------
angular separation : `~astropy.units.Quantity` ['angle'] or float
Type depends on input; ``Quantity`` in angular units, or float in
radians.
Notes
-----
The angular separation is calculated using the Vincenty formula [1]_,
which is slightly more complex and computationally expensive than
some alternatives, but is stable at at all distances, including the
poles and antipodes.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
sdlon = np.sin(lon2 - lon1)
cdlon = np.cos(lon2 - lon1)
slat1 = np.sin(lat1)
slat2 = np.sin(lat2)
clat1 = np.cos(lat1)
clat2 = np.cos(lat2)
num1 = clat2 * sdlon
num2 = clat1 * slat2 - slat1 * clat2 * cdlon
denominator = slat1 * slat2 + clat1 * clat2 * cdlon
return np.arctan2(np.hypot(num1, num2), denominator)
def position_angle(lon1, lat1, lon2, lat2):
"""
Position Angle (East of North) between two points on a sphere.
Parameters
----------
lon1, lat1, lon2, lat2 : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the two points. Quantities should be in
angular units; floats in radians.
Returns
-------
pa : `~astropy.coordinates.Angle`
The (positive) position angle of the vector pointing from position 1 to
position 2. If any of the angles are arrays, this will contain an array
following the appropriate `numpy` broadcasting rules.
"""
from .angles import Angle
deltalon = lon2 - lon1
colat = np.cos(lat2)
x = np.sin(lat2) * np.cos(lat1) - colat * np.sin(lat1) * np.cos(deltalon)
y = np.sin(deltalon) * colat
return Angle(np.arctan2(y, x), u.radian).wrap_at(360*u.deg)
def offset_by(lon, lat, posang, distance):
"""
Point with the given offset from the given point.
Parameters
----------
lon, lat, posang, distance : `~astropy.coordinates.Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the starting point,
position angle and distance to the final point.
Quantities should be in angular units; floats in radians.
Polar points at lat= +/-90 are treated as limit of +/-(90-epsilon) and same lon.
Returns
-------
lon, lat : `~astropy.coordinates.Angle`
The position of the final point. If any of the angles are arrays,
these will contain arrays following the appropriate `numpy` broadcasting rules.
0 <= lon < 2pi.
"""
from .angles import Angle
# Calculations are done using the spherical trigonometry sine and cosine rules
# of the triangle A at North Pole, B at starting point, C at final point
# with angles A (change in lon), B (posang), C (not used, but negative reciprocal posang)
# with sides a (distance), b (final co-latitude), c (starting colatitude)
# B, a, c are knowns; A and b are unknowns
# https://en.wikipedia.org/wiki/Spherical_trigonometry
cos_a = np.cos(distance)
sin_a = np.sin(distance)
cos_c = np.sin(lat)
sin_c = np.cos(lat)
cos_B = np.cos(posang)
sin_B = np.sin(posang)
# cosine rule: Know two sides: a,c and included angle: B; get unknown side b
cos_b = cos_c * cos_a + sin_c * sin_a * cos_B
# sin_b = np.sqrt(1 - cos_b**2)
# sine rule and cosine rule for A (using both lets arctan2 pick quadrant).
# multiplying both sin_A and cos_A by x=sin_b * sin_c prevents /0 errors
# at poles. Correct for the x=0 multiplication a few lines down.
# sin_A/sin_a == sin_B/sin_b # Sine rule
xsin_A = sin_a * sin_B * sin_c
# cos_a == cos_b * cos_c + sin_b * sin_c * cos_A # cosine rule
xcos_A = cos_a - cos_b * cos_c
A = Angle(np.arctan2(xsin_A, xcos_A), u.radian)
# Treat the poles as if they are infinitesimally far from pole but at given lon
small_sin_c = sin_c < 1e-12
if small_sin_c.any():
# For south pole (cos_c = -1), A = posang; for North pole, A=180 deg - posang
A_pole = (90*u.deg + cos_c*(90*u.deg-Angle(posang, u.radian))).to(u.rad)
if A.shape:
# broadcast to ensure the shape is like that of A, which is also
# affected by the (possible) shapes of lat, posang, and distance.
small_sin_c = np.broadcast_to(small_sin_c, A.shape)
A[small_sin_c] = A_pole[small_sin_c]
else:
A = A_pole
outlon = (Angle(lon, u.radian) + A).wrap_at(360.0*u.deg).to(u.deg)
outlat = Angle(np.arcsin(cos_b), u.radian).to(u.deg)
return outlon, outlat
def golden_spiral_grid(size):
"""Generate a grid of points on the surface of the unit sphere using the
Fibonacci or Golden Spiral method.
.. seealso::
`Evenly distributing points on a sphere <https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere>`_
Parameters
----------
size : int
The number of points to generate.
Returns
-------
rep : `~astropy.coordinates.UnitSphericalRepresentation`
The grid of points.
"""
golden_r = (1 + 5**0.5) / 2
grid = np.arange(0, size, dtype=float) + 0.5
lon = 2*np.pi / golden_r * grid * u.rad
lat = np.arcsin(1 - 2 * grid / size) * u.rad
return UnitSphericalRepresentation(lon, lat)
def uniform_spherical_random_surface(size=1):
"""Generate a random sampling of points on the surface of the unit sphere.
Parameters
----------
size : int
The number of points to generate.
Returns
-------
rep : `~astropy.coordinates.UnitSphericalRepresentation`
The random points.
"""
rng = np.random # can maybe switch to this being an input later - see #11628
lon = rng.uniform(0, 2*np.pi, size) * u.rad
lat = np.arcsin(rng.uniform(-1, 1, size=size)) * u.rad
return UnitSphericalRepresentation(lon, lat)
def uniform_spherical_random_volume(size=1, max_radius=1):
"""Generate a random sampling of points that follow a uniform volume
density distribution within a sphere.
Parameters
----------
size : int
The number of points to generate.
max_radius : number, quantity-like, optional
A dimensionless or unit-ful factor to scale the random distances.
Returns
-------
rep : `~astropy.coordinates.SphericalRepresentation`
The random points.
"""
rng = np.random # can maybe switch to this being an input later - see #11628
usph = uniform_spherical_random_surface(size=size)
r = np.cbrt(rng.uniform(size=size)) * u.Quantity(max_radius, copy=False)
return SphericalRepresentation(usph.lon, usph.lat, r)
# # below here can be deleted in v5.0
from astropy.utils.decorators import deprecated
from astropy.coordinates import angle_formats
__old_angle_utilities_funcs = ['check_hms_ranges', 'degrees_to_dms',
'degrees_to_string', 'dms_to_degrees',
'format_exception', 'hms_to_degrees',
'hms_to_dms', 'hms_to_hours',
'hms_to_radians', 'hours_to_decimal',
'hours_to_hms', 'hours_to_radians',
'hours_to_string', 'parse_angle',
'radians_to_degrees', 'radians_to_dms',
'radians_to_hms', 'radians_to_hours',
'sexagesimal_to_string']
for funcname in __old_angle_utilities_funcs:
vars()[funcname] = deprecated(name='astropy.coordinates.angle_utilities.' + funcname,
alternative='astropy.coordinates.angle_formats.' + funcname,
since='v4.3')(getattr(angle_formats, funcname))
|
f9b4014cc3fe0bbd048fab23e4832b576094bcb26adc62bd1b4f3b558a1dde20 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains a general framework for defining graphs of transformations
between coordinates, suitable for either spatial coordinates or more generalized
coordinate systems.
The fundamental idea is that each class is a node in the transformation graph,
and transitions from one node to another are defined as functions (or methods)
wrapped in transformation objects.
This module also includes more specific transformation classes for
celestial/spatial coordinate frames, generally focused around matrix-style
transformations that are typically how the algorithms are defined.
"""
import heapq
import inspect
import subprocess
from warnings import warn
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from contextlib import suppress, contextmanager
from inspect import signature
import numpy as np
from astropy import units as u
from astropy.utils.exceptions import AstropyWarning
from .matrix_utilities import matrix_product
__all__ = ['TransformGraph', 'CoordinateTransform', 'FunctionTransform',
'BaseAffineTransform', 'AffineTransform',
'StaticMatrixTransform', 'DynamicMatrixTransform',
'FunctionTransformWithFiniteDifference', 'CompositeTransform']
def frame_attrs_from_set(frame_set):
"""
A `dict` of all the attributes of all frame classes in this
`TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = {}
for frame_cls in frame_set:
result.update(frame_cls.frame_attributes)
return result
def frame_comps_from_set(frame_set):
"""
A `set` of all component names every defined within any frame class in
this `TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = set()
for frame_cls in frame_set:
rep_info = frame_cls._frame_specific_representation_info
for mappings in rep_info.values():
for rep_map in mappings:
result.update([rep_map.framename])
return result
class TransformGraph:
"""
A graph representing the paths between coordinate frames.
"""
def __init__(self):
self._graph = defaultdict(dict)
self.invalidate_cache() # generates cache entries
@property
def _cached_names(self):
if self._cached_names_dct is None:
self._cached_names_dct = dct = {}
for c in self.frame_set:
nm = getattr(c, 'name', None)
if nm is not None:
if not isinstance(nm, list):
nm = [nm]
for name in nm:
dct[name] = c
return self._cached_names_dct
@property
def frame_set(self):
"""
A `set` of all the frame classes present in this `TransformGraph`.
"""
if self._cached_frame_set is None:
self._cached_frame_set = set()
for a in self._graph:
self._cached_frame_set.add(a)
for b in self._graph[a]:
self._cached_frame_set.add(b)
return self._cached_frame_set.copy()
@property
def frame_attributes(self):
"""
A `dict` of all the attributes of all frame classes in this
`TransformGraph`.
"""
if self._cached_frame_attributes is None:
self._cached_frame_attributes = frame_attrs_from_set(self.frame_set)
return self._cached_frame_attributes
@property
def frame_component_names(self):
"""
A `set` of all component names every defined within any frame class in
this `TransformGraph`.
"""
if self._cached_component_names is None:
self._cached_component_names = frame_comps_from_set(self.frame_set)
return self._cached_component_names
def invalidate_cache(self):
"""
Invalidates the cache that stores optimizations for traversing the
transform graph. This is called automatically when transforms
are added or removed, but will need to be called manually if
weights on transforms are modified inplace.
"""
self._cached_names_dct = None
self._cached_frame_set = None
self._cached_frame_attributes = None
self._cached_component_names = None
self._shortestpaths = {}
self._composite_cache = {}
def add_transform(self, fromsys, tosys, transform):
"""
Add a new coordinate transformation to the graph.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
transform : `CoordinateTransform`
The transformation object. Typically a `CoordinateTransform` object,
although it may be some other callable that is called with the same
signature.
Raises
------
TypeError
If ``fromsys`` or ``tosys`` are not classes or ``transform`` is
not callable.
"""
if not inspect.isclass(fromsys):
raise TypeError('fromsys must be a class')
if not inspect.isclass(tosys):
raise TypeError('tosys must be a class')
if not callable(transform):
raise TypeError('transform must be callable')
frame_set = self.frame_set.copy()
frame_set.add(fromsys)
frame_set.add(tosys)
# Now we check to see if any attributes on the proposed frames override
# *any* component names, which we can't allow for some of the logic in
# the SkyCoord initializer to work
attrs = set(frame_attrs_from_set(frame_set).keys())
comps = frame_comps_from_set(frame_set)
invalid_attrs = attrs.intersection(comps)
if invalid_attrs:
invalid_frames = set()
for attr in invalid_attrs:
if attr in fromsys.frame_attributes:
invalid_frames.update([fromsys])
if attr in tosys.frame_attributes:
invalid_frames.update([tosys])
raise ValueError("Frame(s) {} contain invalid attribute names: {}"
"\nFrame attributes can not conflict with *any* of"
" the frame data component names (see"
" `frame_transform_graph.frame_component_names`)."
.format(list(invalid_frames), invalid_attrs))
self._graph[fromsys][tosys] = transform
self.invalidate_cache()
def remove_transform(self, fromsys, tosys, transform):
"""
Removes a coordinate transform from the graph.
Parameters
----------
fromsys : class or None
The coordinate frame *class* to start from. If `None`,
``transform`` will be searched for and removed (``tosys`` must
also be `None`).
tosys : class or None
The coordinate frame *class* to transform into. If `None`,
``transform`` will be searched for and removed (``fromsys`` must
also be `None`).
transform : callable or None
The transformation object to be removed or `None`. If `None`
and ``tosys`` and ``fromsys`` are supplied, there will be no
check to ensure the correct object is removed.
"""
if fromsys is None or tosys is None:
if not (tosys is None and fromsys is None):
raise ValueError('fromsys and tosys must both be None if either are')
if transform is None:
raise ValueError('cannot give all Nones to remove_transform')
# search for the requested transform by brute force and remove it
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
if agraph[b] is transform:
del agraph[b]
fromsys = a
break
# If the transform was found, need to break out of the outer for loop too
if fromsys:
break
else:
raise ValueError(f'Could not find transform {transform} in the graph')
else:
if transform is None:
self._graph[fromsys].pop(tosys, None)
else:
curr = self._graph[fromsys].get(tosys, None)
if curr is transform:
self._graph[fromsys].pop(tosys)
else:
raise ValueError('Current transform from {} to {} is not '
'{}'.format(fromsys, tosys, transform))
# Remove the subgraph if it is now empty
if self._graph[fromsys] == {}:
self._graph.pop(fromsys)
self.invalidate_cache()
def find_shortest_path(self, fromsys, tosys):
"""
Computes the shortest distance along the transform graph from
one system to another.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
path : list of class or None
The path from ``fromsys`` to ``tosys`` as an in-order sequence
of classes. This list includes *both* ``fromsys`` and
``tosys``. Is `None` if there is no possible path.
distance : float or int
The total distance/priority from ``fromsys`` to ``tosys``. If
priorities are not set this is the number of transforms
needed. Is ``inf`` if there is no possible path.
"""
inf = float('inf')
# special-case the 0 or 1-path
if tosys is fromsys:
if tosys not in self._graph[fromsys]:
# Means there's no transform necessary to go from it to itself.
return [tosys], 0
if tosys in self._graph[fromsys]:
# this will also catch the case where tosys is fromsys, but has
# a defined transform.
t = self._graph[fromsys][tosys]
return [fromsys, tosys], float(t.priority if hasattr(t, 'priority') else 1)
# otherwise, need to construct the path:
if fromsys in self._shortestpaths:
# already have a cached result
fpaths = self._shortestpaths[fromsys]
if tosys in fpaths:
return fpaths[tosys]
else:
return None, inf
# use Dijkstra's algorithm to find shortest path in all other cases
nodes = []
# first make the list of nodes
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
if fromsys not in nodes or tosys not in nodes:
# fromsys or tosys are isolated or not registered, so there's
# certainly no way to get from one to the other
return None, inf
edgeweights = {}
# construct another graph that is a dict of dicts of priorities
# (used as edge weights in Dijkstra's algorithm)
for a in self._graph:
edgeweights[a] = aew = {}
agraph = self._graph[a]
for b in agraph:
aew[b] = float(agraph[b].priority if hasattr(agraph[b], 'priority') else 1)
# entries in q are [distance, count, nodeobj, pathlist]
# count is needed because in py 3.x, tie-breaking fails on the nodes.
# this way, insertion order is preserved if the weights are the same
q = [[inf, i, n, []] for i, n in enumerate(nodes) if n is not fromsys]
q.insert(0, [0, -1, fromsys, []])
# this dict will store the distance to node from ``fromsys`` and the path
result = {}
# definitely starts as a valid heap because of the insert line; from the
# node to itself is always the shortest distance
while len(q) > 0:
d, orderi, n, path = heapq.heappop(q)
if d == inf:
# everything left is unreachable from fromsys, just copy them to
# the results and jump out of the loop
result[n] = (None, d)
for d, orderi, n, path in q:
result[n] = (None, d)
break
else:
result[n] = (path, d)
path.append(n)
if n not in edgeweights:
# this is a system that can be transformed to, but not from.
continue
for n2 in edgeweights[n]:
if n2 not in result: # already visited
# find where n2 is in the heap
for i in range(len(q)):
if q[i][2] == n2:
break
else:
raise ValueError('n2 not in heap - this should be impossible!')
newd = d + edgeweights[n][n2]
if newd < q[i][0]:
q[i][0] = newd
q[i][3] = list(path)
heapq.heapify(q)
# cache for later use
self._shortestpaths[fromsys] = result
return result[tosys]
def get_transform(self, fromsys, tosys):
"""
Generates and returns the `CompositeTransform` for a transformation
between two coordinate systems.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
trans : `CompositeTransform` or None
If there is a path from ``fromsys`` to ``tosys``, this is a
transform object for that path. If no path could be found, this is
`None`.
Notes
-----
This function always returns a `CompositeTransform`, because
`CompositeTransform` is slightly more adaptable in the way it can be
called than other transform classes. Specifically, it takes care of
intermediate steps of transformations in a way that is consistent with
1-hop transformations.
"""
if not inspect.isclass(fromsys):
raise TypeError('fromsys is not a class')
if not inspect.isclass(tosys):
raise TypeError('tosys is not a class')
path, distance = self.find_shortest_path(fromsys, tosys)
if path is None:
return None
transforms = []
currsys = fromsys
for p in path[1:]: # first element is fromsys so we skip it
transforms.append(self._graph[currsys][p])
currsys = p
fttuple = (fromsys, tosys)
if fttuple not in self._composite_cache:
comptrans = CompositeTransform(transforms, fromsys, tosys,
register_graph=False)
self._composite_cache[fttuple] = comptrans
return self._composite_cache[fttuple]
def lookup_name(self, name):
"""
Tries to locate the coordinate class with the provided alias.
Parameters
----------
name : str
The alias to look up.
Returns
-------
`BaseCoordinateFrame` subclass
The coordinate class corresponding to the ``name`` or `None` if
no such class exists.
"""
return self._cached_names.get(name, None)
def get_names(self):
"""
Returns all available transform names. They will all be
valid arguments to `lookup_name`.
Returns
-------
nms : list
The aliases for coordinate systems.
"""
return list(self._cached_names.keys())
def to_dot_graph(self, priorities=True, addnodes=[], savefn=None,
savelayout='plain', saveformat=None, color_edges=True):
"""
Converts this transform graph to the graphviz_ DOT format.
Optionally saves it (requires `graphviz`_ be installed and on your path).
.. _graphviz: http://www.graphviz.org/
Parameters
----------
priorities : bool
If `True`, show the priority values for each transform. Otherwise,
the will not be included in the graph.
addnodes : sequence of str
Additional coordinate systems to add (this can include systems
already in the transform graph, but they will only appear once).
savefn : None or str
The file name to save this graph to or `None` to not save
to a file.
savelayout : str
The graphviz program to use to layout the graph (see
graphviz_ for details) or 'plain' to just save the DOT graph
content. Ignored if ``savefn`` is `None`.
saveformat : str
The graphviz output format. (e.g. the ``-Txxx`` option for
the command line program - see graphviz docs for details).
Ignored if ``savefn`` is `None`.
color_edges : bool
Color the edges between two nodes (frames) based on the type of
transform. ``FunctionTransform``: red, ``StaticMatrixTransform``:
blue, ``DynamicMatrixTransform``: green.
Returns
-------
dotgraph : str
A string with the DOT format graph.
"""
nodes = []
# find the node names
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
for node in addnodes:
if node not in nodes:
nodes.append(node)
nodenames = []
invclsaliases = {f: [k for k, v in self._cached_names.items() if v == f]
for f in self.frame_set}
for n in nodes:
if n in invclsaliases:
aliases = '`\\n`'.join(invclsaliases[n])
nodenames.append('{0} [shape=oval label="{0}\\n`{1}`"]'.format(n.__name__, aliases))
else:
nodenames.append(n.__name__ + '[ shape=oval ]')
edgenames = []
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, 'priority') else 1
color = trans_to_color[transform.__class__] if color_edges else 'black'
edgenames.append((a.__name__, b.__name__, pri, color))
# generate simple dot format graph
lines = ['digraph AstropyCoordinateTransformGraph {']
lines.append('graph [rankdir=LR]')
lines.append('; '.join(nodenames) + ';')
for enm1, enm2, weights, color in edgenames:
labelstr_fmt = '[ {0} {1} ]'
if priorities:
priority_part = f'label = "{weights}"'
else:
priority_part = ''
color_part = f'color = "{color}"'
labelstr = labelstr_fmt.format(priority_part, color_part)
lines.append(f'{enm1} -> {enm2}{labelstr};')
lines.append('')
lines.append('overlap=false')
lines.append('}')
dotgraph = '\n'.join(lines)
if savefn is not None:
if savelayout == 'plain':
with open(savefn, 'w') as f:
f.write(dotgraph)
else:
args = [savelayout]
if saveformat is not None:
args.append('-T' + saveformat)
proc = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate(dotgraph)
if proc.returncode != 0:
raise OSError('problem running graphviz: \n' + stderr)
with open(savefn, 'w') as f:
f.write(stdout)
return dotgraph
def to_networkx_graph(self):
"""
Converts this transform graph into a networkx graph.
.. note::
You must have the `networkx <https://networkx.github.io/>`_
package installed for this to work.
Returns
-------
nxgraph : ``networkx.Graph``
This `TransformGraph` as a `networkx.Graph <https://networkx.github.io/documentation/stable/reference/classes/graph.html>`_.
"""
import networkx as nx
nxgraph = nx.Graph()
# first make the nodes
for a in self._graph:
if a not in nxgraph:
nxgraph.add_node(a)
for b in self._graph[a]:
if b not in nxgraph:
nxgraph.add_node(b)
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, 'priority') else 1
color = trans_to_color[transform.__class__]
nxgraph.add_edge(a, b, weight=pri, color=color)
return nxgraph
def transform(self, transcls, fromsys, tosys, priority=1, **kwargs):
"""
A function decorator for defining transformations.
.. note::
If decorating a static method of a class, ``@staticmethod``
should be added *above* this decorator.
Parameters
----------
transcls : class
The class of the transformation object to create.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
Additional keyword arguments are passed into the ``transcls``
constructor.
Returns
-------
deco : function
A function that can be called on another function as a decorator
(see example).
Notes
-----
This decorator assumes the first argument of the ``transcls``
initializer accepts a callable, and that the second and third
are ``fromsys`` and ``tosys``. If this is not true, you should just
initialize the class manually and use `add_transform` instead of
using this decorator.
Examples
--------
::
graph = TransformGraph()
class Frame1(BaseCoordinateFrame):
...
class Frame2(BaseCoordinateFrame):
...
@graph.transform(FunctionTransform, Frame1, Frame2)
def f1_to_f2(f1_obj):
... do something with f1_obj ...
return f2_obj
"""
def deco(func):
# this doesn't do anything directly with the transform because
# ``register_graph=self`` stores it in the transform graph
# automatically
transcls(func, fromsys, tosys, priority=priority,
register_graph=self, **kwargs)
return func
return deco
def _add_merged_transform(self, fromsys, tosys, *furthersys, priority=1):
"""
Add a single-step transform that encapsulates a multi-step transformation path,
using the transforms that already exist in the graph.
The created transform internally calls the existing transforms. If all of the
transforms are affine, the merged transform is
`~astropy.coordinates.transformations.DynamicMatrixTransform` (if there are no
origin shifts) or `~astropy.coordinates.transformations.AffineTransform`
(otherwise). If at least one of the transforms is not affine, the merged
transform is
`~astropy.coordinates.transformations.FunctionTransformWithFiniteDifference`.
This method is primarily useful for defining loopback transformations
(i.e., where ``fromsys`` and the final ``tosys`` are the same).
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform to.
*furthersys : class
Additional coordinate frame classes to transform to in order.
priority : number
The priority of this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
Notes
-----
Even though the created transform is a single step in the graph, it
will still internally call the constituent transforms. Thus, there is
no performance benefit for using this created transform.
For Astropy's built-in frames, loopback transformations typically use
`~astropy.coordinates.ICRS` to be safe. Tranforming through an inertial
frame ensures that changes in observation time and observer
location/velocity are properly accounted for.
An error will be raised if a direct transform between ``fromsys`` and
``tosys`` already exist.
"""
frames = [fromsys, tosys, *furthersys]
lastsys = frames[-1]
full_path = self.get_transform(fromsys, lastsys)
transforms = [self.get_transform(frame_a, frame_b)
for frame_a, frame_b in zip(frames[:-1], frames[1:])]
if None in transforms:
raise ValueError(f"This transformation path is not possible")
if len(full_path.transforms) == 1:
raise ValueError(f"A direct transform for {fromsys.__name__}->{lastsys.__name__} already exists")
self.add_transform(fromsys, lastsys,
CompositeTransform(transforms, fromsys, lastsys,
priority=priority)._as_single_transform())
@contextmanager
def impose_finite_difference_dt(self, dt):
"""
Context manager to impose a finite-difference time step on all applicable transformations
For each transformation in this transformation graph that has the attribute
``finite_difference_dt``, that attribute is set to the provided value. The only standard
transformation with this attribute is
`~astropy.coordinates.transformations.FunctionTransformWithFiniteDifference`.
Parameters
----------
dt : `~astropy.units.Quantity` ['time'] or callable
If a quantity, this is the size of the differential used to do the finite difference.
If a callable, should accept ``(fromcoord, toframe)`` and return the ``dt`` value.
"""
key = 'finite_difference_dt'
saved_settings = []
try:
for to_frames in self._graph.values():
for transform in to_frames.values():
if hasattr(transform, key):
old_setting = (transform, key, getattr(transform, key))
saved_settings.append(old_setting)
setattr(transform, key, dt)
yield
finally:
for setting in saved_settings:
setattr(*setting)
# <-------------------Define the builtin transform classes-------------------->
class CoordinateTransform(metaclass=ABCMeta):
"""
An object that transforms a coordinate from one system to another.
Subclasses must implement `__call__` with the provided signature.
They should also call this superclass's ``__init__`` in their
``__init__``.
Parameters
----------
fromsys : `~astropy.coordinates.BaseCoordinateFrame` subclass
The coordinate frame class to start from.
tosys : `~astropy.coordinates.BaseCoordinateFrame` subclass
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
"""
def __init__(self, fromsys, tosys, priority=1, register_graph=None):
if not inspect.isclass(fromsys):
raise TypeError('fromsys must be a class')
if not inspect.isclass(tosys):
raise TypeError('tosys must be a class')
self.fromsys = fromsys
self.tosys = tosys
self.priority = float(priority)
if register_graph:
# this will do the type-checking when it adds to the graph
self.register(register_graph)
else:
if not inspect.isclass(fromsys) or not inspect.isclass(tosys):
raise TypeError('fromsys and tosys must be classes')
self.overlapping_frame_attr_names = overlap = []
if (hasattr(fromsys, 'get_frame_attr_names') and
hasattr(tosys, 'get_frame_attr_names')):
# the if statement is there so that non-frame things might be usable
# if it makes sense
for from_nm in fromsys.frame_attributes.keys():
if from_nm in tosys.frame_attributes.keys():
overlap.append(from_nm)
def register(self, graph):
"""
Add this transformation to the requested Transformation graph,
replacing anything already connecting these two coordinates.
Parameters
----------
graph : `TransformGraph` object
The graph to register this transformation with.
"""
graph.add_transform(self.fromsys, self.tosys, self)
def unregister(self, graph):
"""
Remove this transformation from the requested transformation
graph.
Parameters
----------
graph : a TransformGraph object
The graph to unregister this transformation from.
Raises
------
ValueError
If this is not currently in the transform graph.
"""
graph.remove_transform(self.fromsys, self.tosys, self)
@abstractmethod
def __call__(self, fromcoord, toframe):
"""
Does the actual coordinate transformation from the ``fromsys`` class to
the ``tosys`` class.
Parameters
----------
fromcoord : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
An object of class matching ``fromsys`` that is to be transformed.
toframe : object
An object that has the attributes necessary to fully specify the
frame. That is, it must have attributes with names that match the
keys of the dictionary that ``tosys.get_frame_attr_names()``
returns. Typically this is of class ``tosys``, but it *might* be
some other class as long as it has the appropriate attributes.
Returns
-------
tocoord : `BaseCoordinateFrame` subclass instance
The new coordinate after the transform has been applied.
"""
class FunctionTransform(CoordinateTransform):
"""
A coordinate transformation defined by a function that accepts a
coordinate object and returns the transformed coordinate object.
Parameters
----------
func : callable
The transformation function. Should have a call signature
``func(formcoord, toframe)``. Note that, unlike
`CoordinateTransform.__call__`, ``toframe`` is assumed to be of type
``tosys`` for this function.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``func`` is not callable.
ValueError
If ``func`` cannot accept two arguments.
"""
def __init__(self, func, fromsys, tosys, priority=1, register_graph=None):
if not callable(func):
raise TypeError('func must be callable')
with suppress(TypeError):
sig = signature(func)
kinds = [x.kind for x in sig.parameters.values()]
if (len(x for x in kinds if x == sig.POSITIONAL_ONLY) != 2 and
sig.VAR_POSITIONAL not in kinds):
raise ValueError('provided function does not accept two arguments')
self.func = func
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
def __call__(self, fromcoord, toframe):
res = self.func(fromcoord, toframe)
if not isinstance(res, self.tosys):
raise TypeError(f'the transformation function yielded {res} but '
f'should have been of type {self.tosys}')
if fromcoord.data.differentials and not res.data.differentials:
warn("Applied a FunctionTransform to a coordinate frame with "
"differentials, but the FunctionTransform does not handle "
"differentials, so they have been dropped.", AstropyWarning)
return res
class FunctionTransformWithFiniteDifference(FunctionTransform):
r"""
A coordinate transformation that works like a `FunctionTransform`, but
computes velocity shifts based on the finite-difference relative to one of
the frame attributes. Note that the transform function should *not* change
the differential at all in this case, as any differentials will be
overridden.
When a differential is in the from coordinate, the finite difference
calculation has two components. The first part is simple the existing
differential, but re-orientation (using finite-difference techniques) to
point in the direction the velocity vector has in the *new* frame. The
second component is the "induced" velocity. That is, the velocity
intrinsic to the frame itself, estimated by shifting the frame using the
``finite_difference_frameattr_name`` frame attribute a small amount
(``finite_difference_dt``) in time and re-calculating the position.
Parameters
----------
finite_difference_frameattr_name : str or None
The name of the frame attribute on the frames to use for the finite
difference. Both the to and the from frame will be checked for this
attribute, but only one needs to have it. If None, no velocity
component induced from the frame itself will be included - only the
re-orientation of any existing differential.
finite_difference_dt : `~astropy.units.Quantity` ['time'] or callable
If a quantity, this is the size of the differential used to do the
finite difference. If a callable, should accept
``(fromcoord, toframe)`` and return the ``dt`` value.
symmetric_finite_difference : bool
If True, the finite difference is computed as
:math:`\frac{x(t + \Delta t / 2) - x(t + \Delta t / 2)}{\Delta t}`, or
if False, :math:`\frac{x(t + \Delta t) - x(t)}{\Delta t}`. The latter
case has slightly better performance (and more stable finite difference
behavior).
All other parameters are identical to the initializer for
`FunctionTransform`.
"""
def __init__(self, func, fromsys, tosys, priority=1, register_graph=None,
finite_difference_frameattr_name='obstime',
finite_difference_dt=1*u.second,
symmetric_finite_difference=True):
super().__init__(func, fromsys, tosys, priority, register_graph)
self.finite_difference_frameattr_name = finite_difference_frameattr_name
self.finite_difference_dt = finite_difference_dt
self.symmetric_finite_difference = symmetric_finite_difference
@property
def finite_difference_frameattr_name(self):
return self._finite_difference_frameattr_name
@finite_difference_frameattr_name.setter
def finite_difference_frameattr_name(self, value):
if value is None:
self._diff_attr_in_fromsys = self._diff_attr_in_tosys = False
else:
diff_attr_in_fromsys = value in self.fromsys.frame_attributes
diff_attr_in_tosys = value in self.tosys.frame_attributes
if diff_attr_in_fromsys or diff_attr_in_tosys:
self._diff_attr_in_fromsys = diff_attr_in_fromsys
self._diff_attr_in_tosys = diff_attr_in_tosys
else:
raise ValueError('Frame attribute name {} is not a frame '
'attribute of {} or {}'.format(value,
self.fromsys,
self.tosys))
self._finite_difference_frameattr_name = value
def __call__(self, fromcoord, toframe):
from .representation import (CartesianRepresentation,
CartesianDifferential)
supcall = self.func
if fromcoord.data.differentials:
# this is the finite difference case
if callable(self.finite_difference_dt):
dt = self.finite_difference_dt(fromcoord, toframe)
else:
dt = self.finite_difference_dt
halfdt = dt/2
from_diffless = fromcoord.realize_frame(fromcoord.data.without_differentials())
reprwithoutdiff = supcall(from_diffless, toframe)
# first we use the existing differential to compute an offset due to
# the already-existing velocity, but in the new frame
fromcoord_cart = fromcoord.cartesian
if self.symmetric_finite_difference:
fwdxyz = (fromcoord_cart.xyz +
fromcoord_cart.differentials['s'].d_xyz*halfdt)
fwd = supcall(fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe)
backxyz = (fromcoord_cart.xyz -
fromcoord_cart.differentials['s'].d_xyz*halfdt)
back = supcall(fromcoord.realize_frame(CartesianRepresentation(backxyz)), toframe)
else:
fwdxyz = (fromcoord_cart.xyz +
fromcoord_cart.differentials['s'].d_xyz*dt)
fwd = supcall(fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe)
back = reprwithoutdiff
diffxyz = (fwd.cartesian - back.cartesian).xyz / dt
# now we compute the "induced" velocities due to any movement in
# the frame itself over time
attrname = self.finite_difference_frameattr_name
if attrname is not None:
if self.symmetric_finite_difference:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + halfdt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + halfdt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) - halfdt}
from_diffless_back = from_diffless.replicate(**kws)
else:
from_diffless_back = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) - halfdt}
back_frame = toframe.replicate_without_data(**kws)
else:
back_frame = toframe
back = supcall(from_diffless_back, back_frame)
else:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + dt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + dt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
back = reprwithoutdiff
diffxyz += (fwd.cartesian - back.cartesian).xyz / dt
newdiff = CartesianDifferential(diffxyz)
reprwithdiff = reprwithoutdiff.data.to_cartesian().with_differentials(newdiff)
return reprwithoutdiff.realize_frame(reprwithdiff)
else:
return supcall(fromcoord, toframe)
class BaseAffineTransform(CoordinateTransform):
"""Base class for common functionality between the ``AffineTransform``-type
subclasses.
This base class is needed because ``AffineTransform`` and the matrix
transform classes share the ``__call__()`` method, but differ in how they
generate the affine parameters. ``StaticMatrixTransform`` passes in a
matrix stored as a class attribute, and both of the matrix transforms pass
in ``None`` for the offset. Hence, user subclasses would likely want to
subclass this (rather than ``AffineTransform``) if they want to provide
alternative transformations using this machinery.
"""
def _apply_transform(self, fromcoord, matrix, offset):
from .representation import (UnitSphericalRepresentation,
CartesianDifferential,
SphericalDifferential,
SphericalCosLatDifferential,
RadialDifferential)
data = fromcoord.data
has_velocity = 's' in data.differentials
# Bail out if no transform is actually requested
if matrix is None and offset is None:
return data
# list of unit differentials
_unit_diffs = (SphericalDifferential._unit_differential,
SphericalCosLatDifferential._unit_differential)
unit_vel_diff = (has_velocity and
isinstance(data.differentials['s'], _unit_diffs))
rad_vel_diff = (has_velocity and
isinstance(data.differentials['s'], RadialDifferential))
# Some initial checking to short-circuit doing any re-representation if
# we're going to fail anyways:
if isinstance(data, UnitSphericalRepresentation) and offset is not None:
raise TypeError("Position information stored on coordinate frame "
"is insufficient to do a full-space position "
"transformation (representation class: {})"
.format(data.__class__))
elif (has_velocity and (unit_vel_diff or rad_vel_diff) and
offset is not None and 's' in offset.differentials):
# Coordinate has a velocity, but it is not a full-space velocity
# that we need to do a velocity offset
raise TypeError("Velocity information stored on coordinate frame "
"is insufficient to do a full-space velocity "
"transformation (differential class: {})"
.format(data.differentials['s'].__class__))
elif len(data.differentials) > 1:
# We should never get here because the frame initializer shouldn't
# allow more differentials, but this just adds protection for
# subclasses that somehow skip the checks
raise ValueError("Representation passed to AffineTransform contains"
" multiple associated differentials. Only a single"
" differential with velocity units is presently"
" supported (differentials: {})."
.format(str(data.differentials)))
# If the representation is a UnitSphericalRepresentation, and this is
# just a MatrixTransform, we have to try to turn the differential into a
# Unit version of the differential (if no radial velocity) or a
# sphericaldifferential with zero proper motion (if only a radial
# velocity) so that the matrix operation works
if (has_velocity and isinstance(data, UnitSphericalRepresentation) and
not unit_vel_diff and not rad_vel_diff):
# retrieve just velocity differential
unit_diff = data.differentials['s'].represent_as(
data.differentials['s']._unit_differential, data)
data = data.with_differentials({'s': unit_diff}) # updates key
# If it's a RadialDifferential, we flat-out ignore the differentials
# This is because, by this point (past the validation above), we can
# only possibly be doing a rotation-only transformation, and that
# won't change the radial differential. We later add it back in
elif rad_vel_diff:
data = data.without_differentials()
# Convert the representation and differentials to cartesian without
# having them attached to a frame
rep = data.to_cartesian()
diffs = {k: diff.represent_as(CartesianDifferential, data)
for k, diff in data.differentials.items()}
rep = rep.with_differentials(diffs)
# Only do transform if matrix is specified. This is for speed in
# transformations that only specify an offset (e.g., LSR)
if matrix is not None:
# Note: this applies to both representation and differentials
rep = rep.transform(matrix)
# TODO: if we decide to allow arithmetic between representations that
# contain differentials, this can be tidied up
if offset is not None:
newrep = (rep.without_differentials() +
offset.without_differentials())
else:
newrep = rep.without_differentials()
# We need a velocity (time derivative) and, for now, are strict: the
# representation can only contain a velocity differential and no others.
if has_velocity and not rad_vel_diff:
veldiff = rep.differentials['s'] # already in Cartesian form
if offset is not None and 's' in offset.differentials:
veldiff = veldiff + offset.differentials['s']
newrep = newrep.with_differentials({'s': veldiff})
if isinstance(fromcoord.data, UnitSphericalRepresentation):
# Special-case this because otherwise the return object will think
# it has a valid distance with the default return (a
# CartesianRepresentation instance)
if has_velocity and not unit_vel_diff and not rad_vel_diff:
# We have to first represent as the Unit types we converted to,
# then put the d_distance information back in to the
# differentials and re-represent as their original forms
newdiff = newrep.differentials['s']
_unit_cls = fromcoord.data.differentials['s']._unit_differential
newdiff = newdiff.represent_as(_unit_cls, newrep)
kwargs = {comp: getattr(newdiff, comp) for comp in newdiff.components}
kwargs['d_distance'] = fromcoord.data.differentials['s'].d_distance
diffs = {'s': fromcoord.data.differentials['s'].__class__(
copy=False, **kwargs)}
elif has_velocity and unit_vel_diff:
newdiff = newrep.differentials['s'].represent_as(
fromcoord.data.differentials['s'].__class__, newrep)
diffs = {'s': newdiff}
else:
diffs = newrep.differentials
newrep = newrep.represent_as(fromcoord.data.__class__) # drops diffs
newrep = newrep.with_differentials(diffs)
elif has_velocity and unit_vel_diff:
# Here, we're in the case where the representation is not
# UnitSpherical, but the differential *is* one of the UnitSpherical
# types. We have to convert back to that differential class or the
# resulting frame will think it has a valid radial_velocity. This
# can probably be cleaned up: we currently have to go through the
# dimensional version of the differential before representing as the
# unit differential so that the units work out (the distance length
# unit shouldn't appear in the resulting proper motions)
diff_cls = fromcoord.data.differentials['s'].__class__
newrep = newrep.represent_as(fromcoord.data.__class__,
diff_cls._dimensional_differential)
newrep = newrep.represent_as(fromcoord.data.__class__, diff_cls)
# We pulled the radial differential off of the representation
# earlier, so now we need to put it back. But, in order to do that, we
# have to turn the representation into a repr that is compatible with
# having a RadialDifferential
if has_velocity and rad_vel_diff:
newrep = newrep.represent_as(fromcoord.data.__class__)
newrep = newrep.with_differentials(
{'s': fromcoord.data.differentials['s']})
return newrep
def __call__(self, fromcoord, toframe):
params = self._affine_params(fromcoord, toframe)
newrep = self._apply_transform(fromcoord, *params)
return toframe.realize_frame(newrep)
@abstractmethod
def _affine_params(self, fromcoord, toframe):
pass
class AffineTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a 3 x 3
cartesian transformation matrix and a tuple of displacement vectors.
See `~astropy.coordinates.builtin_frames.galactocentric.Galactocentric` for
an example.
Parameters
----------
transform_func : callable
A callable that has the signature ``transform_func(fromcoord, toframe)``
and returns: a (3, 3) matrix that operates on ``fromcoord`` in a
Cartesian representation, and a ``CartesianRepresentation`` with
(optionally) an attached velocity ``CartesianDifferential`` to represent
a translation and offset in velocity to apply after the matrix
operation.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``transform_func`` is not callable
"""
def __init__(self, transform_func, fromsys, tosys, priority=1,
register_graph=None):
if not callable(transform_func):
raise TypeError('transform_func is not callable')
self.transform_func = transform_func
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
def _affine_params(self, fromcoord, toframe):
return self.transform_func(fromcoord, toframe)
class StaticMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation defined as a 3 x 3 cartesian
transformation matrix.
This is distinct from DynamicMatrixTransform in that this kind of matrix is
independent of frame attributes. That is, it depends *only* on the class of
the frame.
Parameters
----------
matrix : array-like or callable
A 3 x 3 matrix for transforming 3-vectors. In most cases will
be unitary (although this is not strictly required). If a callable,
will be called *with no arguments* to get the matrix.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
ValueError
If the matrix is not 3 x 3
"""
def __init__(self, matrix, fromsys, tosys, priority=1, register_graph=None):
if callable(matrix):
matrix = matrix()
self.matrix = np.array(matrix)
if self.matrix.shape != (3, 3):
raise ValueError('Provided matrix is not 3 x 3')
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
def _affine_params(self, fromcoord, toframe):
return self.matrix, None
class DynamicMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a
3 x 3 cartesian transformation matrix.
This is similar to, but distinct from StaticMatrixTransform, in that the
matrix for this class might depend on frame attributes.
Parameters
----------
matrix_func : callable
A callable that has the signature ``matrix_func(fromcoord, toframe)`` and
returns a 3 x 3 matrix that converts ``fromcoord`` in a cartesian
representation to the new coordinate system.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``matrix_func`` is not callable
"""
def __init__(self, matrix_func, fromsys, tosys, priority=1,
register_graph=None):
if not callable(matrix_func):
raise TypeError('matrix_func is not callable')
self.matrix_func = matrix_func
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
def _affine_params(self, fromcoord, toframe):
return self.matrix_func(fromcoord, toframe), None
class CompositeTransform(CoordinateTransform):
"""
A transformation constructed by combining together a series of single-step
transformations.
Note that the intermediate frame objects are constructed using any frame
attributes in ``toframe`` or ``fromframe`` that overlap with the intermediate
frame (``toframe`` favored over ``fromframe`` if there's a conflict). Any frame
attributes that are not present use the defaults.
Parameters
----------
transforms : sequence of `CoordinateTransform` object
The sequence of transformations to apply.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
collapse_static_mats : bool
If `True`, consecutive `StaticMatrixTransform` will be collapsed into a
single transformation to speed up the calculation.
"""
def __init__(self, transforms, fromsys, tosys, priority=1,
register_graph=None, collapse_static_mats=True):
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
if collapse_static_mats:
transforms = self._combine_statics(transforms)
self.transforms = tuple(transforms)
def _combine_statics(self, transforms):
"""
Combines together sequences of `StaticMatrixTransform`s into a single
transform and returns it.
"""
newtrans = []
for currtrans in transforms:
lasttrans = newtrans[-1] if len(newtrans) > 0 else None
if (isinstance(lasttrans, StaticMatrixTransform) and
isinstance(currtrans, StaticMatrixTransform)):
combinedmat = matrix_product(currtrans.matrix, lasttrans.matrix)
newtrans[-1] = StaticMatrixTransform(combinedmat,
lasttrans.fromsys,
currtrans.tosys)
else:
newtrans.append(currtrans)
return newtrans
def __call__(self, fromcoord, toframe):
curr_coord = fromcoord
for t in self.transforms:
# build an intermediate frame with attributes taken from either
# `toframe`, or if not there, `fromcoord`, or if not there, use
# the defaults
# TODO: caching this information when creating the transform may
# speed things up a lot
frattrs = {}
for inter_frame_attr_nm in t.tosys.get_frame_attr_names():
if hasattr(toframe, inter_frame_attr_nm):
attr = getattr(toframe, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
elif hasattr(fromcoord, inter_frame_attr_nm):
attr = getattr(fromcoord, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
curr_toframe = t.tosys(**frattrs)
curr_coord = t(curr_coord, curr_toframe)
# this is safe even in the case where self.transforms is empty, because
# coordinate objects are immutable, so copying is not needed
return curr_coord
def _as_single_transform(self):
"""
Return an encapsulated version of the composite transform so that it appears to
be a single transform.
The returned transform internally calls the constituent transforms. If all of
the transforms are affine, the merged transform is
`~astropy.coordinates.transformations.DynamicMatrixTransform` (if there are no
origin shifts) or `~astropy.coordinates.transformations.AffineTransform`
(otherwise). If at least one of the transforms is not affine, the merged
transform is
`~astropy.coordinates.transformations.FunctionTransformWithFiniteDifference`.
"""
# Create a list of the transforms including flattening any constituent CompositeTransform
transforms = [t if not isinstance(t, CompositeTransform) else t._as_single_transform()
for t in self.transforms]
if all([isinstance(t, BaseAffineTransform) for t in transforms]):
# Check if there may be an origin shift
fixed_origin = all([isinstance(t, (StaticMatrixTransform, DynamicMatrixTransform))
for t in transforms])
# Dynamically define the transformation function
def single_transform(from_coo, to_frame):
if from_coo.is_equivalent_frame(to_frame): # loopback to the same frame
return None if fixed_origin else (None, None)
# Create a merged attribute dictionary for any intermediate frames
# For any attributes shared by the "from"/"to" frames, the "to" frame takes
# precedence because this is the same choice implemented in __call__()
merged_attr = {name: getattr(from_coo, name)
for name in from_coo.frame_attributes}
merged_attr.update({name: getattr(to_frame, name)
for name in to_frame.frame_attributes})
affine_params = (None, None)
# Step through each transform step (frame A -> frame B)
for i, t in enumerate(transforms):
# Extract the relevant attributes for frame A
if i == 0:
# If frame A is actually the initial frame, preserve its attributes
a_attr = {name: getattr(from_coo, name)
for name in from_coo.frame_attributes}
else:
a_attr = {k: v for k, v in merged_attr.items()
if k in t.fromsys.frame_attributes}
# Extract the relevant attributes for frame B
b_attr = {k: v for k, v in merged_attr.items()
if k in t.tosys.frame_attributes}
# Obtain the affine parameters for the transform
# Note that we insert some dummy data into frame A because the transformation
# machinery requires there to be data present. Removing that limitation
# is a possible TODO, but some care would need to be taken because some affine
# transforms have branching code depending on the presence of differentials.
next_affine_params = t._affine_params(t.fromsys(from_coo.data, **a_attr),
t.tosys(**b_attr))
# Combine the affine parameters with the running set
affine_params = _combine_affine_params(affine_params, next_affine_params)
# If there is no origin shift, return only the matrix
return affine_params[0] if fixed_origin else affine_params
# The return type depends on whether there is any origin shift
transform_type = DynamicMatrixTransform if fixed_origin else AffineTransform
else:
# Dynamically define the transformation function
def single_transform(from_coo, to_frame):
if from_coo.is_equivalent_frame(to_frame): # loopback to the same frame
return to_frame.realize_frame(from_coo.data)
return self(from_coo, to_frame)
transform_type = FunctionTransformWithFiniteDifference
return transform_type(single_transform, self.fromsys, self.tosys, priority=self.priority)
def _combine_affine_params(params, next_params):
"""
Combine two sets of affine parameters.
The parameters for an affine transformation are a 3 x 3 Cartesian
transformation matrix and a displacement vector, which can include an
attached velocity. Either type of parameter can be ``None``.
"""
M, vec = params
next_M, next_vec = next_params
# Multiply the transformation matrices if they both exist
if M is not None and next_M is not None:
new_M = next_M @ M
else:
new_M = M if M is not None else next_M
if vec is not None:
# Transform the first displacement vector by the second transformation matrix
if next_M is not None:
vec = vec.transform(next_M)
# Calculate the new displacement vector
if next_vec is not None:
if 's' in vec.differentials and 's' in next_vec.differentials:
# Adding vectors with velocities takes more steps
# TODO: Add support in representation.py
new_vec_velocity = vec.differentials['s'] + next_vec.differentials['s']
new_vec = vec.without_differentials() + next_vec.without_differentials()
new_vec = new_vec.with_differentials({'s': new_vec_velocity})
else:
new_vec = vec + next_vec
else:
new_vec = vec
else:
new_vec = next_vec
return new_M, new_vec
# map class names to colorblind-safe colors
trans_to_color = {}
trans_to_color[AffineTransform] = '#555555' # gray
trans_to_color[FunctionTransform] = '#783001' # dark red-ish/brown
trans_to_color[FunctionTransformWithFiniteDifference] = '#d95f02' # red-ish
trans_to_color[StaticMatrixTransform] = '#7570b3' # blue-ish
trans_to_color[DynamicMatrixTransform] = '#1b9e77' # green-ish
|
4f0bfcf5daaa6e01dd1c8f5383b4afc0bd63777c84efbd8504de3e92d0e7dda1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the classes and utility functions for distance and
cartesian coordinates.
"""
import warnings
import numpy as np
from astropy import units as u
from astropy.utils.exceptions import AstropyWarning
from .angles import Angle
__all__ = ['Distance']
__doctest_requires__ = {'*': ['scipy']}
class Distance(u.SpecificTypeQuantity):
"""
A one-dimensional distance.
This can be initialized by providing one of the following:
* Distance ``value`` (array or float) and a ``unit``
* |Quantity| object with dimensionality of length
* Redshift and (optionally) a `~astropy.cosmology.Cosmology`
* Distance modulus
* Parallax
Parameters
----------
value : scalar or `~astropy.units.Quantity` ['length']
The value of this distance.
unit : `~astropy.units.UnitBase` ['length']
The unit for this distance.
z : float
A redshift for this distance. It will be converted to a distance
by computing the luminosity distance for this redshift given the
cosmology specified by ``cosmology``. Must be given as a keyword
argument.
cosmology : `~astropy.cosmology.Cosmology` or None
A cosmology that will be used to compute the distance from ``z``.
If `None`, the current cosmology will be used (see
`astropy.cosmology` for details).
distmod : float or `~astropy.units.Quantity`
The distance modulus for this distance. Note that if ``unit`` is not
provided, a guess will be made at the unit between AU, pc, kpc, and Mpc.
parallax : `~astropy.units.Quantity` or `~astropy.coordinates.Angle`
The parallax in angular units.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
order : {'C', 'F', 'A'}, optional
See `~astropy.units.Quantity`.
subok : bool, optional
See `~astropy.units.Quantity`.
ndmin : int, optional
See `~astropy.units.Quantity`.
allow_negative : bool, optional
Whether to allow negative distances (which are possible in some
cosmologies). Default: `False`.
Raises
------
`~astropy.units.UnitsError`
If the ``unit`` is not a length unit.
ValueError
If value specified is less than 0 and ``allow_negative=False``.
If ``cosmology`` is provided when ``z`` is *not* given.
If either none or more than one of ``value``, ``z``, ``distmod``,
or ``parallax`` were given.
Examples
--------
>>> from astropy import units as u
>>> from astropy.cosmology import WMAP5
>>> Distance(10, u.Mpc)
<Distance 10. Mpc>
>>> Distance(40*u.pc, unit=u.kpc)
<Distance 0.04 kpc>
>>> Distance(z=0.23) # doctest: +FLOAT_CMP
<Distance 1184.01657566 Mpc>
>>> Distance(z=0.23, cosmology=WMAP5) # doctest: +FLOAT_CMP
<Distance 1147.78831918 Mpc>
>>> Distance(distmod=24.47*u.mag) # doctest: +FLOAT_CMP
<Distance 783.42964277 kpc>
>>> Distance(parallax=21.34*u.mas) # doctest: +FLOAT_CMP
<Distance 46.86035614 pc>
"""
_equivalent_unit = u.m
_include_easy_conversion_members = True
def __new__(cls, value=None, unit=None, z=None, cosmology=None,
distmod=None, parallax=None, dtype=np.inexact, copy=True,
order=None, subok=False, ndmin=0, allow_negative=False):
n_not_none = sum(x is not None for x in [value, z, distmod, parallax])
if n_not_none == 0:
raise ValueError('none of `value`, `z`, `distmod`, or `parallax` '
'were given to Distance constructor')
elif n_not_none > 1:
raise ValueError('more than one of `value`, `z`, `distmod`, or '
'`parallax` were given to Distance constructor')
if value is None:
# If something else but `value` was provided then a new array will
# be created anyways and there is no need to copy that.
copy = False
if z is not None:
if cosmology is None:
from astropy.cosmology import default_cosmology
cosmology = default_cosmology.get()
value = cosmology.luminosity_distance(z)
elif cosmology is not None:
raise ValueError('a `cosmology` was given but `z` was not '
'provided in Distance constructor')
elif distmod is not None:
value = cls._distmod_to_pc(distmod)
if unit is None:
# if the unit is not specified, guess based on the mean of
# the log of the distance
meanlogval = np.log10(value.value).mean()
if meanlogval > 6:
unit = u.Mpc
elif meanlogval > 3:
unit = u.kpc
elif meanlogval < -3: # ~200 AU
unit = u.AU
else:
unit = u.pc
elif parallax is not None:
if unit is None:
unit = u.pc
value = parallax.to_value(unit, equivalencies=u.parallax())
if np.any(parallax < 0):
if allow_negative:
warnings.warn(
"negative parallaxes are converted to NaN "
"distances even when `allow_negative=True`, "
"because negative parallaxes cannot be transformed "
"into distances. See the discussion in this paper: "
"https://arxiv.org/abs/1507.02105", AstropyWarning)
else:
raise ValueError(
"some parallaxes are negative, which are not "
"interpretable as distances. See the discussion in "
"this paper: https://arxiv.org/abs/1507.02105 . You "
"can convert negative parallaxes to NaN distances by "
"providing the `allow_negative=True` argument.")
# now we have arguments like for a Quantity, so let it do the work
distance = super().__new__(
cls, value, unit, dtype=dtype, copy=copy, order=order,
subok=subok, ndmin=ndmin)
# This invalid catch block can be removed when the minimum numpy
# version is >= 1.19 (NUMPY_LT_1_19)
with np.errstate(invalid='ignore'):
any_negative = np.any(distance.value < 0)
if not allow_negative and any_negative:
raise ValueError("distance must be >= 0. Use the argument "
"`allow_negative=True` to allow negative values.")
return distance
@property
def z(self):
"""Short for ``self.compute_z()``"""
return self.compute_z()
def compute_z(self, cosmology=None, **atzkw):
"""
The redshift for this distance assuming its physical distance is
a luminosity distance.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` or None
The cosmology to assume for this calculation, or `None` to use the
current cosmology (see `astropy.cosmology` for details).
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
z : `~astropy.units.Quantity`
The redshift of this distance given the provided ``cosmology``.
Warnings
--------
This method can be slow for large arrays.
The redshift is determined using :func:`astropy.cosmology.z_at_value`,
which handles vector inputs (e.g. an array of distances) by
element-wise calling of :func:`scipy.optimize.minimize_scalar`.
For faster results consider using an interpolation table;
:func:`astropy.cosmology.z_at_value` provides details.
See Also
--------
:func:`astropy.cosmology.z_at_value` : Find the redshift corresponding to a
:meth:`astropy.cosmology.FLRW.luminosity_distance`.
"""
from astropy.cosmology import z_at_value
if cosmology is None:
from astropy.cosmology import default_cosmology
cosmology = default_cosmology.get()
atzkw.setdefault("ztol", 1.e-10)
return z_at_value(cosmology.luminosity_distance, self, **atzkw)
@property
def distmod(self):
"""The distance modulus as a `~astropy.units.Quantity`"""
val = 5. * np.log10(self.to_value(u.pc)) - 5.
return u.Quantity(val, u.mag, copy=False)
@classmethod
def _distmod_to_pc(cls, dm):
dm = u.Quantity(dm, u.mag)
return cls(10 ** ((dm.value + 5) / 5.), u.pc, copy=False)
@property
def parallax(self):
"""The parallax angle as an `~astropy.coordinates.Angle` object"""
return Angle(self.to(u.milliarcsecond, u.parallax()))
|
dcf1931e15b6a49ce33f8e7d40e82ba8c7bae9a79e4af3d6b5e8a56159a14e0b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions for coordinate-related functionality.
This is generally just wrapping around the object-oriented coordinates
framework, but it is useful for some users who are used to more functional
interfaces.
"""
import warnings
from collections.abc import Sequence
import numpy as np
import erfa
from astropy import units as u
from astropy.constants import c
from astropy.io import ascii
from astropy.utils import isiterable, data
from .sky_coordinate import SkyCoord
from .builtin_frames import GCRS, PrecessedGeocentric
from .representation import SphericalRepresentation, CartesianRepresentation
from .builtin_frames.utils import get_jd12
__all__ = ['cartesian_to_spherical', 'spherical_to_cartesian', 'get_sun',
'get_constellation', 'concatenate_representations', 'concatenate']
def cartesian_to_spherical(x, y, z):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
Note that the resulting angles are latitude/longitude or
elevation/azimuthal form. I.e., the origin is along the equator
rather than at the north pole.
.. note::
This function simply wraps functionality provided by the
`~astropy.coordinates.CartesianRepresentation` and
`~astropy.coordinates.SphericalRepresentation` classes. In general,
for both performance and readability, we suggest using these classes
directly. But for situations where a quick one-off conversion makes
sense, this function is provided.
Parameters
----------
x : scalar, array-like, or `~astropy.units.Quantity`
The first Cartesian coordinate.
y : scalar, array-like, or `~astropy.units.Quantity`
The second Cartesian coordinate.
z : scalar, array-like, or `~astropy.units.Quantity`
The third Cartesian coordinate.
Returns
-------
r : `~astropy.units.Quantity`
The radial coordinate (in the same units as the inputs).
lat : `~astropy.units.Quantity` ['angle']
The latitude in radians
lon : `~astropy.units.Quantity` ['angle']
The longitude in radians
"""
if not hasattr(x, 'unit'):
x = x * u.dimensionless_unscaled
if not hasattr(y, 'unit'):
y = y * u.dimensionless_unscaled
if not hasattr(z, 'unit'):
z = z * u.dimensionless_unscaled
cart = CartesianRepresentation(x, y, z)
sph = cart.represent_as(SphericalRepresentation)
return sph.distance, sph.lat, sph.lon
def spherical_to_cartesian(r, lat, lon):
"""
Converts spherical polar coordinates to rectangular cartesian
coordinates.
Note that the input angles should be in latitude/longitude or
elevation/azimuthal form. I.e., the origin is along the equator
rather than at the north pole.
.. note::
This is a low-level function used internally in
`astropy.coordinates`. It is provided for users if they really
want to use it, but it is recommended that you use the
`astropy.coordinates` coordinate systems.
Parameters
----------
r : scalar, array-like, or `~astropy.units.Quantity`
The radial coordinate (in the same units as the inputs).
lat : scalar, array-like, or `~astropy.units.Quantity` ['angle']
The latitude (in radians if array or scalar)
lon : scalar, array-like, or `~astropy.units.Quantity` ['angle']
The longitude (in radians if array or scalar)
Returns
-------
x : float or array
The first cartesian coordinate.
y : float or array
The second cartesian coordinate.
z : float or array
The third cartesian coordinate.
"""
if not hasattr(r, 'unit'):
r = r * u.dimensionless_unscaled
if not hasattr(lat, 'unit'):
lat = lat * u.radian
if not hasattr(lon, 'unit'):
lon = lon * u.radian
sph = SphericalRepresentation(distance=r, lat=lat, lon=lon)
cart = sph.represent_as(CartesianRepresentation)
return cart.x, cart.y, cart.z
def get_sun(time):
"""
Determines the location of the sun at a given time (or times, if the input
is an array `~astropy.time.Time` object), in geocentric coordinates.
Parameters
----------
time : `~astropy.time.Time`
The time(s) at which to compute the location of the sun.
Returns
-------
newsc : `~astropy.coordinates.SkyCoord`
The location of the sun as a `~astropy.coordinates.SkyCoord` in the
`~astropy.coordinates.GCRS` frame.
Notes
-----
The algorithm for determining the sun/earth relative position is based
on the simplified version of VSOP2000 that is part of ERFA. Compared to
JPL's ephemeris, it should be good to about 4 km (in the Sun-Earth
vector) from 1900-2100 C.E., 8 km for the 1800-2200 span, and perhaps
250 km over the 1000-3000.
"""
earth_pv_helio, earth_pv_bary = erfa.epv00(*get_jd12(time, 'tdb'))
# We have to manually do aberration because we're outputting directly into
# GCRS
earth_p = earth_pv_helio['p']
earth_v = earth_pv_bary['v']
# convert barycentric velocity to units of c, but keep as array for passing in to erfa
earth_v /= c.to_value(u.au/u.d)
dsun = np.sqrt(np.sum(earth_p**2, axis=-1))
invlorentz = (1-np.sum(earth_v**2, axis=-1))**0.5
properdir = erfa.ab(earth_p/dsun.reshape(dsun.shape + (1,)),
-earth_v, dsun, invlorentz)
cartrep = CartesianRepresentation(x=-dsun*properdir[..., 0] * u.AU,
y=-dsun*properdir[..., 1] * u.AU,
z=-dsun*properdir[..., 2] * u.AU)
return SkyCoord(cartrep, frame=GCRS(obstime=time))
# global dictionary that caches repeatedly-needed info for get_constellation
_constellation_data = {}
def get_constellation(coord, short_name=False, constellation_list='iau'):
"""
Determines the constellation(s) a given coordinate object contains.
Parameters
----------
coord : coordinate-like
The object to determine the constellation of.
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If ``coords`` contains a scalar coordinate, returns the name of the
constellation. If it is an array coordinate object, it returns an array
of names.
Notes
-----
To determine which constellation a point on the sky is in, this precesses
to B1875, and then uses the Delporte boundaries of the 88 modern
constellations, as tabulated by
`Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_.
"""
if constellation_list != 'iau':
raise ValueError("only 'iau' us currently supported for constellation_list")
# read the data files and cache them if they haven't been already
if not _constellation_data:
cdata = data.get_pkg_data_contents('data/constellation_data_roman87.dat')
ctable = ascii.read(cdata, names=['ral', 'rau', 'decl', 'name'])
cnames = data.get_pkg_data_contents('data/constellation_names.dat', encoding='UTF8')
cnames_short_to_long = {l[:3]: l[4:] for l in cnames.split('\n')
if not l.startswith('#')}
cnames_long = np.array([cnames_short_to_long[nm] for nm in ctable['name']])
_constellation_data['ctable'] = ctable
_constellation_data['cnames_long'] = cnames_long
else:
ctable = _constellation_data['ctable']
cnames_long = _constellation_data['cnames_long']
isscalar = coord.isscalar
# if it is geocentric, we reproduce the frame but with the 1875 equinox,
# which is where the constellations are defined
# this yields a "dubious year" warning because ERFA considers the year 1875
# "dubious", probably because UTC isn't well-defined then and precession
# models aren't precisely calibrated back to then. But it's plenty
# sufficient for constellations
with warnings.catch_warnings():
warnings.simplefilter('ignore', erfa.ErfaWarning)
constel_coord = coord.transform_to(PrecessedGeocentric(equinox='B1875'))
if isscalar:
rah = constel_coord.ra.ravel().hour
decd = constel_coord.dec.ravel().deg
else:
rah = constel_coord.ra.hour
decd = constel_coord.dec.deg
constellidx = -np.ones(len(rah), dtype=int)
notided = constellidx == -1 # should be all
for i, row in enumerate(ctable):
msk = (row['ral'] < rah) & (rah < row['rau']) & (decd > row['decl'])
constellidx[notided & msk] = i
notided = constellidx == -1
if np.sum(notided) == 0:
break
else:
raise ValueError(f'Could not find constellation for coordinates {constel_coord[notided]}')
if short_name:
names = ctable['name'][constellidx]
else:
names = cnames_long[constellidx]
if isscalar:
return names[0]
else:
return names
def _concatenate_components(reps_difs, names):
""" Helper function for the concatenate function below. Gets and
concatenates all of the individual components for an iterable of
representations or differentials.
"""
values = []
for name in names:
unit0 = getattr(reps_difs[0], name).unit
# Go via to_value because np.concatenate doesn't work with Quantity
data_vals = [getattr(x, name).to_value(unit0) for x in reps_difs]
concat_vals = np.concatenate(np.atleast_1d(*data_vals))
concat_vals = concat_vals << unit0
values.append(concat_vals)
return values
def concatenate_representations(reps):
"""
Combine multiple representation objects into a single instance by
concatenating the data in each component.
Currently, all of the input representations have to be the same type. This
properly handles differential or velocity data, but all input objects must
have the same differential object type as well.
Parameters
----------
reps : sequence of `~astropy.coordinates.BaseRepresentation`
The objects to concatenate
Returns
-------
rep : `~astropy.coordinates.BaseRepresentation` subclass instance
A single representation object with its data set to the concatenation of
all the elements of the input sequence of representations.
"""
if not isinstance(reps, (Sequence, np.ndarray)):
raise TypeError('Input must be a list or iterable of representation '
'objects.')
# First, validate that the representations are the same, and
# concatenate all of the positional data:
rep_type = type(reps[0])
if any(type(r) != rep_type for r in reps):
raise TypeError('Input representations must all have the same type.')
# Construct the new representation with the concatenated data from the
# representations passed in
values = _concatenate_components(reps,
rep_type.attr_classes.keys())
new_rep = rep_type(*values)
has_diff = any('s' in rep.differentials for rep in reps)
if has_diff and any('s' not in rep.differentials for rep in reps):
raise ValueError('Input representations must either all contain '
'differentials, or not contain differentials.')
if has_diff:
dif_type = type(reps[0].differentials['s'])
if any('s' not in r.differentials or
type(r.differentials['s']) != dif_type
for r in reps):
raise TypeError('All input representations must have the same '
'differential type.')
values = _concatenate_components([r.differentials['s'] for r in reps],
dif_type.attr_classes.keys())
new_dif = dif_type(*values)
new_rep = new_rep.with_differentials({'s': new_dif})
return new_rep
def concatenate(coords):
"""
Combine multiple coordinate objects into a single
`~astropy.coordinates.SkyCoord`.
"Coordinate objects" here mean frame objects with data,
`~astropy.coordinates.SkyCoord`, or representation objects. Currently,
they must all be in the same frame, but in a future version this may be
relaxed to allow inhomogeneous sequences of objects.
Parameters
----------
coords : sequence of coordinate-like
The objects to concatenate
Returns
-------
cskycoord : SkyCoord
A single sky coordinate with its data set to the concatenation of all
the elements in ``coords``
"""
if getattr(coords, 'isscalar', False) or not isiterable(coords):
raise TypeError('The argument to concatenate must be iterable')
scs = [SkyCoord(coord, copy=False) for coord in coords]
# Check that all frames are equivalent
for sc in scs[1:]:
if not sc.is_equivalent_frame(scs[0]):
raise ValueError("All inputs must have equivalent frames: "
"{} != {}".format(sc, scs[0]))
# TODO: this can be changed to SkyCoord.from_representation() for a speed
# boost when we switch to using classmethods
return SkyCoord(concatenate_representations([c.data for c in coords]),
frame=scs[0].frame)
|
5063f5117cb8b22fe51bd2e1edc1ef5c2e677633a9b00a5cee7277b70e8d1a8e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
from collections.abc import Sequence
import inspect
import numpy as np
from astropy.units import Unit, IrreducibleUnit
from astropy import units as u
from .baseframe import (BaseCoordinateFrame, frame_transform_graph,
_get_repr_cls, _get_diff_cls)
from .builtin_frames import ICRS
from .representation import (BaseRepresentation, SphericalRepresentation,
UnitSphericalRepresentation)
"""
This module contains utility functions to make the SkyCoord initializer more modular
and maintainable. No functionality here should be in the public API, but rather used as
part of creating SkyCoord objects.
"""
PLUS_MINUS_RE = re.compile(r'(\+|\-)')
J_PREFIXED_RA_DEC_RE = re.compile(
r"""J # J prefix
([0-9]{6,7}\.?[0-9]{0,2}) # RA as HHMMSS.ss or DDDMMSS.ss, optional decimal digits
([\+\-][0-9]{6}\.?[0-9]{0,2})\s*$ # Dec as DDMMSS.ss, optional decimal digits
""", re.VERBOSE)
def _get_frame_class(frame):
"""
Get a frame class from the input `frame`, which could be a frame name
string, or frame class.
"""
if isinstance(frame, str):
frame_names = frame_transform_graph.get_names()
if frame not in frame_names:
raise ValueError('Coordinate frame name "{}" is not a known '
'coordinate frame ({})'
.format(frame, sorted(frame_names)))
frame_cls = frame_transform_graph.lookup_name(frame)
elif inspect.isclass(frame) and issubclass(frame, BaseCoordinateFrame):
frame_cls = frame
else:
raise ValueError("Coordinate frame must be a frame name or frame "
"class, not a '{}'".format(frame.__class__.__name__))
return frame_cls
_conflict_err_msg = ("Coordinate attribute '{0}'={1!r} conflicts with keyword "
"argument '{0}'={2!r}. This usually means an attribute "
"was set on one of the input objects and also in the "
"keyword arguments to {3}")
def _get_frame_without_data(args, kwargs):
"""
Determines the coordinate frame from input SkyCoord args and kwargs.
This function extracts (removes) all frame attributes from the kwargs and
determines the frame class either using the kwargs, or using the first
element in the args (if a single frame object is passed in, for example).
This function allows a frame to be specified as a string like 'icrs' or a
frame class like ICRS, or an instance ICRS(), as long as the instance frame
attributes don't conflict with kwargs passed in (which could require a
three-way merge with the coordinate data possibly specified via the args).
"""
from .sky_coordinate import SkyCoord
# We eventually (hopefully) fill and return these by extracting the frame
# and frame attributes from the input:
frame_cls = None
frame_cls_kwargs = {}
# The first place to check: the frame could be specified explicitly
frame = kwargs.pop('frame', None)
if frame is not None:
# Here the frame was explicitly passed in as a keyword argument.
# If the frame is an instance or SkyCoord, we extract the attributes
# and split the instance into the frame class and an attributes dict
if isinstance(frame, SkyCoord):
# If the frame was passed as a SkyCoord, we also want to preserve
# any extra attributes (e.g., obstime) if they are not already
# specified in the kwargs. We preserve these extra attributes by
# adding them to the kwargs dict:
for attr in frame._extra_frameattr_names:
if (attr in kwargs and
np.any(getattr(frame, attr) != kwargs[attr])):
# This SkyCoord attribute passed in with the frame= object
# conflicts with an attribute passed in directly to the
# SkyCoord initializer as a kwarg:
raise ValueError(_conflict_err_msg
.format(attr, getattr(frame, attr),
kwargs[attr], 'SkyCoord'))
else:
kwargs[attr] = getattr(frame, attr)
frame = frame.frame
if isinstance(frame, BaseCoordinateFrame):
# Extract any frame attributes
for attr in frame.get_frame_attr_names():
# If the frame was specified as an instance, we have to make
# sure that no frame attributes were specified as kwargs - this
# would require a potential three-way merge:
if attr in kwargs:
raise ValueError("Cannot specify frame attribute '{}' "
"directly as an argument to SkyCoord "
"because a frame instance was passed in. "
"Either pass a frame class, or modify the "
"frame attributes of the input frame "
"instance.".format(attr))
elif not frame.is_frame_attr_default(attr):
kwargs[attr] = getattr(frame, attr)
frame_cls = frame.__class__
# Make sure we propagate representation/differential _type choices,
# unless these are specified directly in the kwargs:
kwargs.setdefault('representation_type', frame.representation_type)
kwargs.setdefault('differential_type', frame.differential_type)
if frame_cls is None: # frame probably a string
frame_cls = _get_frame_class(frame)
# Check that the new frame doesn't conflict with existing coordinate frame
# if a coordinate is supplied in the args list. If the frame still had not
# been set by this point and a coordinate was supplied, then use that frame.
for arg in args:
# this catches the "single list passed in" case. For that case we want
# to allow the first argument to set the class. That's OK because
# _parse_coordinate_arg goes and checks that the frames match between
# the first and all the others
if (isinstance(arg, (Sequence, np.ndarray)) and
len(args) == 1 and len(arg) > 0):
arg = arg[0]
coord_frame_obj = coord_frame_cls = None
if isinstance(arg, BaseCoordinateFrame):
coord_frame_obj = arg
elif isinstance(arg, SkyCoord):
coord_frame_obj = arg.frame
if coord_frame_obj is not None:
coord_frame_cls = coord_frame_obj.__class__
frame_diff = coord_frame_obj.get_representation_cls('s')
if frame_diff is not None:
# we do this check because otherwise if there's no default
# differential (i.e. it is None), the code below chokes. but
# None still gets through if the user *requests* it
kwargs.setdefault('differential_type', frame_diff)
for attr in coord_frame_obj.get_frame_attr_names():
if (attr in kwargs and
not coord_frame_obj.is_frame_attr_default(attr) and
np.any(kwargs[attr] != getattr(coord_frame_obj, attr))):
raise ValueError("Frame attribute '{}' has conflicting "
"values between the input coordinate data "
"and either keyword arguments or the "
"frame specification (frame=...): "
"{} =/= {}"
.format(attr,
getattr(coord_frame_obj, attr),
kwargs[attr]))
elif (attr not in kwargs and
not coord_frame_obj.is_frame_attr_default(attr)):
kwargs[attr] = getattr(coord_frame_obj, attr)
if coord_frame_cls is not None:
if frame_cls is None:
frame_cls = coord_frame_cls
elif frame_cls is not coord_frame_cls:
raise ValueError("Cannot override frame='{}' of input "
"coordinate with new frame='{}'. Instead, "
"transform the coordinate."
.format(coord_frame_cls.__name__,
frame_cls.__name__))
if frame_cls is None:
frame_cls = ICRS
# By now, frame_cls should be set - if it's not, something went wrong
if not issubclass(frame_cls, BaseCoordinateFrame):
# We should hopefully never get here...
raise ValueError(f'Frame class has unexpected type: {frame_cls.__name__}')
for attr in frame_cls.frame_attributes:
if attr in kwargs:
frame_cls_kwargs[attr] = kwargs.pop(attr)
if 'representation_type' in kwargs:
frame_cls_kwargs['representation_type'] = _get_repr_cls(
kwargs.pop('representation_type'))
differential_type = kwargs.pop('differential_type', None)
if differential_type is not None:
frame_cls_kwargs['differential_type'] = _get_diff_cls(
differential_type)
return frame_cls, frame_cls_kwargs
def _parse_coordinate_data(frame, args, kwargs):
"""
Extract coordinate data from the args and kwargs passed to SkyCoord.
By this point, we assume that all of the frame attributes have been
extracted from kwargs (see _get_frame_without_data()), so all that are left
are (1) extra SkyCoord attributes, and (2) the coordinate data, specified in
any of the valid ways.
"""
valid_skycoord_kwargs = {}
valid_components = {}
info = None
# Look through the remaining kwargs to see if any are valid attribute names
# by asking the frame transform graph:
attr_names = list(kwargs.keys())
for attr in attr_names:
if attr in frame_transform_graph.frame_attributes:
valid_skycoord_kwargs[attr] = kwargs.pop(attr)
# By this point in parsing the arguments, anything left in the args and
# kwargs should be data. Either as individual components, or a list of
# objects, or a representation, etc.
# Get units of components
units = _get_representation_component_units(args, kwargs)
# Grab any frame-specific attr names like `ra` or `l` or `distance` from
# kwargs and move them to valid_components.
valid_components.update(_get_representation_attrs(frame, units, kwargs))
# Error if anything is still left in kwargs
if kwargs:
# The next few lines add a more user-friendly error message to a
# common and confusing situation when the user specifies, e.g.,
# `pm_ra` when they really should be passing `pm_ra_cosdec`. The
# extra error should only turn on when the positional representation
# is spherical, and when the component 'pm_<lon>' is passed.
pm_message = ''
if frame.representation_type == SphericalRepresentation:
frame_names = list(frame.get_representation_component_names().keys())
lon_name = frame_names[0]
lat_name = frame_names[1]
if f'pm_{lon_name}' in list(kwargs.keys()):
pm_message = ('\n\n By default, most frame classes expect '
'the longitudinal proper motion to include '
'the cos(latitude) term, named '
'`pm_{}_cos{}`. Did you mean to pass in '
'this component?'
.format(lon_name, lat_name))
raise ValueError('Unrecognized keyword argument(s) {}{}'
.format(', '.join(f"'{key}'"
for key in kwargs),
pm_message))
# Finally deal with the unnamed args. This figures out what the arg[0]
# is and returns a dict with appropriate key/values for initializing
# frame class. Note that differentials are *never* valid args, only
# kwargs. So they are not accounted for here (unless they're in a frame
# or SkyCoord object)
if args:
if len(args) == 1:
# One arg which must be a coordinate. In this case coord_kwargs
# will contain keys like 'ra', 'dec', 'distance' along with any
# frame attributes like equinox or obstime which were explicitly
# specified in the coordinate object (i.e. non-default).
_skycoord_kwargs, _components = _parse_coordinate_arg(
args[0], frame, units, kwargs)
# Copy other 'info' attr only if it has actually been defined.
if 'info' in getattr(args[0], '__dict__', ()):
info = args[0].info
elif len(args) <= 3:
_skycoord_kwargs = {}
_components = {}
frame_attr_names = frame.representation_component_names.keys()
repr_attr_names = frame.representation_component_names.values()
for arg, frame_attr_name, repr_attr_name, unit in zip(args, frame_attr_names,
repr_attr_names, units):
attr_class = frame.representation_type.attr_classes[repr_attr_name]
_components[frame_attr_name] = attr_class(arg, unit=unit)
else:
raise ValueError('Must supply no more than three positional arguments, got {}'
.format(len(args)))
# The next two loops copy the component and skycoord attribute data into
# their final, respective "valid_" dictionaries. For each, we check that
# there are no relevant conflicts with values specified by the user
# through other means:
# First validate the component data
for attr, coord_value in _components.items():
if attr in valid_components:
raise ValueError(_conflict_err_msg
.format(attr, coord_value,
valid_components[attr], 'SkyCoord'))
valid_components[attr] = coord_value
# Now validate the custom SkyCoord attributes
for attr, value in _skycoord_kwargs.items():
if (attr in valid_skycoord_kwargs and
np.any(valid_skycoord_kwargs[attr] != value)):
raise ValueError(_conflict_err_msg
.format(attr, value,
valid_skycoord_kwargs[attr],
'SkyCoord'))
valid_skycoord_kwargs[attr] = value
return valid_skycoord_kwargs, valid_components, info
def _get_representation_component_units(args, kwargs):
"""
Get the unit from kwargs for the *representation* components (not the
differentials).
"""
if 'unit' not in kwargs:
units = [None, None, None]
else:
units = kwargs.pop('unit')
if isinstance(units, str):
units = [x.strip() for x in units.split(',')]
# Allow for input like unit='deg' or unit='m'
if len(units) == 1:
units = [units[0], units[0], units[0]]
elif isinstance(units, (Unit, IrreducibleUnit)):
units = [units, units, units]
try:
units = [(Unit(x) if x else None) for x in units]
units.extend(None for x in range(3 - len(units)))
if len(units) > 3:
raise ValueError()
except Exception as err:
raise ValueError('Unit keyword must have one to three unit values as '
'tuple or comma-separated string.') from err
return units
def _parse_coordinate_arg(coords, frame, units, init_kwargs):
"""
Single unnamed arg supplied. This must be:
- Coordinate frame with data
- Representation
- SkyCoord
- List or tuple of:
- String which splits into two values
- Iterable with two values
- SkyCoord, frame, or representation objects.
Returns a dict mapping coordinate attribute names to values (or lists of
values)
"""
from .sky_coordinate import SkyCoord
is_scalar = False # Differentiate between scalar and list input
# valid_kwargs = {} # Returned dict of lon, lat, and distance (optional)
components = {}
skycoord_kwargs = {}
frame_attr_names = list(frame.representation_component_names.keys())
repr_attr_names = list(frame.representation_component_names.values())
repr_attr_classes = list(frame.representation_type.attr_classes.values())
n_attr_names = len(repr_attr_names)
# Turn a single string into a list of strings for convenience
if isinstance(coords, str):
is_scalar = True
coords = [coords]
if isinstance(coords, (SkyCoord, BaseCoordinateFrame)):
# Note that during parsing of `frame` it is checked that any coordinate
# args have the same frame as explicitly supplied, so don't worry here.
if not coords.has_data:
raise ValueError('Cannot initialize from a frame without coordinate data')
data = coords.data.represent_as(frame.representation_type)
values = [] # List of values corresponding to representation attrs
repr_attr_name_to_drop = []
for repr_attr_name in repr_attr_names:
# If coords did not have an explicit distance then don't include in initializers.
if (isinstance(coords.data, UnitSphericalRepresentation) and
repr_attr_name == 'distance'):
repr_attr_name_to_drop.append(repr_attr_name)
continue
# Get the value from `data` in the eventual representation
values.append(getattr(data, repr_attr_name))
# drop the ones that were skipped because they were distances
for nametodrop in repr_attr_name_to_drop:
nameidx = repr_attr_names.index(nametodrop)
del repr_attr_names[nameidx]
del units[nameidx]
del frame_attr_names[nameidx]
del repr_attr_classes[nameidx]
if coords.data.differentials and 's' in coords.data.differentials:
orig_vel = coords.data.differentials['s']
vel = coords.data.represent_as(frame.representation_type, frame.get_representation_cls('s')).differentials['s']
for frname, reprname in frame.get_representation_component_names('s').items():
if (reprname == 'd_distance' and
not hasattr(orig_vel, reprname) and
'unit' in orig_vel.get_name()):
continue
values.append(getattr(vel, reprname))
units.append(None)
frame_attr_names.append(frname)
repr_attr_names.append(reprname)
repr_attr_classes.append(vel.attr_classes[reprname])
for attr in frame_transform_graph.frame_attributes:
value = getattr(coords, attr, None)
use_value = (isinstance(coords, SkyCoord) or
attr not in coords.get_frame_attr_names())
if use_value and value is not None:
skycoord_kwargs[attr] = value
elif isinstance(coords, BaseRepresentation):
if coords.differentials and 's' in coords.differentials:
diffs = frame.get_representation_cls('s')
data = coords.represent_as(frame.representation_type, diffs)
values = [getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names]
for frname, reprname in frame.get_representation_component_names('s').items():
values.append(getattr(data.differentials['s'], reprname))
units.append(None)
frame_attr_names.append(frname)
repr_attr_names.append(reprname)
repr_attr_classes.append(data.differentials['s'].attr_classes[reprname])
else:
data = coords.represent_as(frame.representation_type)
values = [getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names]
elif (isinstance(coords, np.ndarray) and coords.dtype.kind in 'if' and
coords.ndim == 2 and coords.shape[1] <= 3):
# 2-d array of coordinate values. Handle specially for efficiency.
values = coords.transpose() # Iterates over repr attrs
elif isinstance(coords, (Sequence, np.ndarray)):
# Handles list-like input.
vals = []
is_ra_dec_representation = ('ra' in frame.representation_component_names and
'dec' in frame.representation_component_names)
coord_types = (SkyCoord, BaseCoordinateFrame, BaseRepresentation)
if any(isinstance(coord, coord_types) for coord in coords):
# this parsing path is used when there are coordinate-like objects
# in the list - instead of creating lists of values, we create
# SkyCoords from the list elements and then combine them.
scs = [SkyCoord(coord, **init_kwargs) for coord in coords]
# Check that all frames are equivalent
for sc in scs[1:]:
if not sc.is_equivalent_frame(scs[0]):
raise ValueError("List of inputs don't have equivalent "
"frames: {} != {}".format(sc, scs[0]))
# Now use the first to determine if they are all UnitSpherical
allunitsphrepr = isinstance(scs[0].data, UnitSphericalRepresentation)
# get the frame attributes from the first coord in the list, because
# from the above we know it matches all the others. First copy over
# the attributes that are in the frame itself, then copy over any
# extras in the SkyCoord
for fattrnm in scs[0].frame.frame_attributes:
skycoord_kwargs[fattrnm] = getattr(scs[0].frame, fattrnm)
for fattrnm in scs[0]._extra_frameattr_names:
skycoord_kwargs[fattrnm] = getattr(scs[0], fattrnm)
# Now combine the values, to be used below
values = []
for data_attr_name, repr_attr_name in zip(frame_attr_names, repr_attr_names):
if allunitsphrepr and repr_attr_name == 'distance':
# if they are *all* UnitSpherical, don't give a distance
continue
data_vals = []
for sc in scs:
data_val = getattr(sc, data_attr_name)
data_vals.append(data_val.reshape(1,) if sc.isscalar else data_val)
concat_vals = np.concatenate(data_vals)
# Hack because np.concatenate doesn't fully work with Quantity
if isinstance(concat_vals, u.Quantity):
concat_vals._unit = data_val.unit
values.append(concat_vals)
else:
# none of the elements are "frame-like"
# turn into a list of lists like [[v1_0, v2_0, v3_0], ... [v1_N, v2_N, v3_N]]
for coord in coords:
if isinstance(coord, str):
coord1 = coord.split()
if len(coord1) == 6:
coord = (' '.join(coord1[:3]), ' '.join(coord1[3:]))
elif is_ra_dec_representation:
coord = _parse_ra_dec(coord)
else:
coord = coord1
vals.append(coord) # Assumes coord is a sequence at this point
# Do some basic validation of the list elements: all have a length and all
# lengths the same
try:
n_coords = sorted({len(x) for x in vals})
except Exception as err:
raise ValueError('One or more elements of input sequence '
'does not have a length.') from err
if len(n_coords) > 1:
raise ValueError('Input coordinate values must have '
'same number of elements, found {}'.format(n_coords))
n_coords = n_coords[0]
# Must have no more coord inputs than representation attributes
if n_coords > n_attr_names:
raise ValueError('Input coordinates have {} values but '
'representation {} only accepts {}'
.format(n_coords,
frame.representation_type.get_name(),
n_attr_names))
# Now transpose vals to get [(v1_0 .. v1_N), (v2_0 .. v2_N), (v3_0 .. v3_N)]
# (ok since we know it is exactly rectangular). (Note: can't just use zip(*values)
# because Longitude et al distinguishes list from tuple so [a1, a2, ..] is needed
# while (a1, a2, ..) doesn't work.
values = [list(x) for x in zip(*vals)]
if is_scalar:
values = [x[0] for x in values]
else:
raise ValueError('Cannot parse coordinates from first argument')
# Finally we have a list of values from which to create the keyword args
# for the frame initialization. Validate by running through the appropriate
# class initializer and supply units (which might be None).
try:
for frame_attr_name, repr_attr_class, value, unit in zip(
frame_attr_names, repr_attr_classes, values, units):
components[frame_attr_name] = repr_attr_class(value, unit=unit,
copy=False)
except Exception as err:
raise ValueError('Cannot parse first argument data "{}" for attribute '
'{}'.format(value, frame_attr_name)) from err
return skycoord_kwargs, components
def _get_representation_attrs(frame, units, kwargs):
"""
Find instances of the "representation attributes" for specifying data
for this frame. Pop them off of kwargs, run through the appropriate class
constructor (to validate and apply unit), and put into the output
valid_kwargs. "Representation attributes" are the frame-specific aliases
for the underlying data values in the representation, e.g. "ra" for "lon"
for many equatorial spherical representations, or "w" for "x" in the
cartesian representation of Galactic.
This also gets any *differential* kwargs, because they go into the same
frame initializer later on.
"""
frame_attr_names = frame.representation_component_names.keys()
repr_attr_classes = frame.representation_type.attr_classes.values()
valid_kwargs = {}
for frame_attr_name, repr_attr_class, unit in zip(frame_attr_names, repr_attr_classes, units):
value = kwargs.pop(frame_attr_name, None)
if value is not None:
try:
valid_kwargs[frame_attr_name] = repr_attr_class(value, unit=unit)
except u.UnitConversionError as err:
error_message = (
f"Unit '{unit}' ({unit.physical_type}) could not be applied to '{frame_attr_name}'. "
"This can occur when passing units for some coordinate components "
"when other components are specified as Quantity objects. "
"Either pass a list of units for all components (and unit-less coordinate data), "
"or pass Quantities for all components."
)
raise u.UnitConversionError(error_message) from err
# also check the differentials. They aren't included in the units keyword,
# so we only look for the names.
differential_type = frame.differential_type
if differential_type is not None:
for frame_name, repr_name in frame.get_representation_component_names('s').items():
diff_attr_class = differential_type.attr_classes[repr_name]
value = kwargs.pop(frame_name, None)
if value is not None:
valid_kwargs[frame_name] = diff_attr_class(value)
return valid_kwargs
def _parse_ra_dec(coord_str):
"""
Parse RA and Dec values from a coordinate string. Currently the
following formats are supported:
* space separated 6-value format
* space separated <6-value format, this requires a plus or minus sign
separation between RA and Dec
* sign separated format
* JHHMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits
* JDDDMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits
Parameters
----------
coord_str : str
Coordinate string to parse.
Returns
-------
coord : str or list of str
Parsed coordinate values.
"""
if isinstance(coord_str, str):
coord1 = coord_str.split()
else:
# This exception should never be raised from SkyCoord
raise TypeError('coord_str must be a single str')
if len(coord1) == 6:
coord = (' '.join(coord1[:3]), ' '.join(coord1[3:]))
elif len(coord1) > 2:
coord = PLUS_MINUS_RE.split(coord_str)
coord = (coord[0], ' '.join(coord[1:]))
elif len(coord1) == 1:
match_j = J_PREFIXED_RA_DEC_RE.match(coord_str)
if match_j:
coord = match_j.groups()
if len(coord[0].split('.')[0]) == 7:
coord = (f'{coord[0][0:3]} {coord[0][3:5]} {coord[0][5:]}',
f'{coord[1][0:3]} {coord[1][3:5]} {coord[1][5:]}')
else:
coord = (f'{coord[0][0:2]} {coord[0][2:4]} {coord[0][4:]}',
f'{coord[1][0:3]} {coord[1][3:5]} {coord[1][5:]}')
else:
coord = PLUS_MINUS_RE.split(coord_str)
coord = (coord[0], ' '.join(coord[1:]))
else:
coord = coord1
return coord
|
92044c9d4603dd309f8307168f5443bc564552e9431f797eccee59db853d9d1a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Framework and base classes for coordinate frames/"low-level" coordinate
classes.
"""
# Standard library
import copy
import inspect
from collections import namedtuple, defaultdict
import warnings
# Dependencies
import numpy as np
# Project
from astropy.utils.compat.misc import override__dir__
from astropy.utils.decorators import lazyproperty, format_doc
from astropy.utils.exceptions import AstropyWarning, AstropyDeprecationWarning
from astropy import units as u
from astropy.utils import ShapedLikeNDArray, check_broadcast
from .transformations import TransformGraph
from . import representation as r
from .angles import Angle
from .attributes import Attribute
__all__ = ['BaseCoordinateFrame', 'frame_transform_graph',
'GenericFrame', 'RepresentationMapping']
# the graph used for all transformations between frames
frame_transform_graph = TransformGraph()
def _get_repr_cls(value):
"""
Return a valid representation class from ``value`` or raise exception.
"""
if value in r.REPRESENTATION_CLASSES:
value = r.REPRESENTATION_CLASSES[value]
elif (not isinstance(value, type) or
not issubclass(value, r.BaseRepresentation)):
raise ValueError(
'Representation is {!r} but must be a BaseRepresentation class '
'or one of the string aliases {}'.format(
value, list(r.REPRESENTATION_CLASSES)))
return value
def _get_diff_cls(value):
"""
Return a valid differential class from ``value`` or raise exception.
As originally created, this is only used in the SkyCoord initializer, so if
that is refactored, this function my no longer be necessary.
"""
if value in r.DIFFERENTIAL_CLASSES:
value = r.DIFFERENTIAL_CLASSES[value]
elif (not isinstance(value, type) or
not issubclass(value, r.BaseDifferential)):
raise ValueError(
'Differential is {!r} but must be a BaseDifferential class '
'or one of the string aliases {}'.format(
value, list(r.DIFFERENTIAL_CLASSES)))
return value
def _get_repr_classes(base, **differentials):
"""Get valid representation and differential classes.
Parameters
----------
base : str or `~astropy.coordinates.BaseRepresentation` subclass
class for the representation of the base coordinates. If a string,
it is looked up among the known representation classes.
**differentials : dict of str or `~astropy.coordinates.BaseDifferentials`
Keys are like for normal differentials, i.e., 's' for a first
derivative in time, etc. If an item is set to `None`, it will be
guessed from the base class.
Returns
-------
repr_classes : dict of subclasses
The base class is keyed by 'base'; the others by the keys of
``diffferentials``.
"""
base = _get_repr_cls(base)
repr_classes = {'base': base}
for name, differential_type in differentials.items():
if differential_type == 'base':
# We don't want to fail for this case.
differential_type = r.DIFFERENTIAL_CLASSES.get(base.get_name(), None)
elif differential_type in r.DIFFERENTIAL_CLASSES:
differential_type = r.DIFFERENTIAL_CLASSES[differential_type]
elif (differential_type is not None
and (not isinstance(differential_type, type)
or not issubclass(differential_type, r.BaseDifferential))):
raise ValueError(
'Differential is {!r} but must be a BaseDifferential class '
'or one of the string aliases {}'.format(
differential_type, list(r.DIFFERENTIAL_CLASSES)))
repr_classes[name] = differential_type
return repr_classes
_RepresentationMappingBase = \
namedtuple('RepresentationMapping',
('reprname', 'framename', 'defaultunit'))
class RepresentationMapping(_RepresentationMappingBase):
"""
This `~collections.namedtuple` is used with the
``frame_specific_representation_info`` attribute to tell frames what
attribute names (and default units) to use for a particular representation.
``reprname`` and ``framename`` should be strings, while ``defaultunit`` can
be either an astropy unit, the string ``'recommended'`` (which is degrees
for Angles, nothing otherwise), or None (to indicate that no unit mapping
should be done).
"""
def __new__(cls, reprname, framename, defaultunit='recommended'):
# this trick just provides some defaults
return super().__new__(cls, reprname, framename, defaultunit)
base_doc = """{__doc__}
Parameters
----------
data : `~astropy.coordinates.BaseRepresentation` subclass instance
A representation object or ``None`` to have no data (or use the
coordinate component arguments, see below).
{components}
representation_type : `~astropy.coordinates.BaseRepresentation` subclass, str, optional
A representation class or string name of a representation class. This
sets the expected input representation class, thereby changing the
expected keyword arguments for the data passed in. For example, passing
``representation_type='cartesian'`` will make the classes expect
position data with cartesian names, i.e. ``x, y, z`` in most cases
unless overridden via ``frame_specific_representation_info``. To see this
frame's names, check out ``<this frame>().representation_info``.
differential_type : `~astropy.coordinates.BaseDifferential` subclass, str, dict, optional
A differential class or dictionary of differential classes (currently
only a velocity differential with key 's' is supported). This sets the
expected input differential class, thereby changing the expected keyword
arguments of the data passed in. For example, passing
``differential_type='cartesian'`` will make the classes expect velocity
data with the argument names ``v_x, v_y, v_z`` unless overridden via
``frame_specific_representation_info``. To see this frame's names,
check out ``<this frame>().representation_info``.
copy : bool, optional
If `True` (default), make copies of the input coordinate arrays.
Can only be passed in as a keyword argument.
{footer}
"""
_components = """
*args, **kwargs
Coordinate components, with names that depend on the subclass.
"""
@format_doc(base_doc, components=_components, footer="")
class BaseCoordinateFrame(ShapedLikeNDArray):
"""
The base class for coordinate frames.
This class is intended to be subclassed to create instances of specific
systems. Subclasses can implement the following attributes:
* `default_representation`
A subclass of `~astropy.coordinates.BaseRepresentation` that will be
treated as the default representation of this frame. This is the
representation assumed by default when the frame is created.
* `default_differential`
A subclass of `~astropy.coordinates.BaseDifferential` that will be
treated as the default differential class of this frame. This is the
differential class assumed by default when the frame is created.
* `~astropy.coordinates.Attribute` class attributes
Frame attributes such as ``FK4.equinox`` or ``FK4.obstime`` are defined
using a descriptor class. See the narrative documentation or
built-in classes code for details.
* `frame_specific_representation_info`
A dictionary mapping the name or class of a representation to a list of
`~astropy.coordinates.RepresentationMapping` objects that tell what
names and default units should be used on this frame for the components
of that representation.
Unless overridden via `frame_specific_representation_info`, velocity name
defaults are:
* ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for `SphericalCosLatDifferential`
proper motion components
* ``pm_{lon}``, ``pm_{lat}`` for `SphericalDifferential` proper motion
components
* ``radial_velocity`` for any ``d_distance`` component
* ``v_{x,y,z}`` for `CartesianDifferential` velocity components
where ``{lon}`` and ``{lat}`` are the frame names of the angular components.
"""
default_representation = None
default_differential = None
# Specifies special names and units for representation and differential
# attributes.
frame_specific_representation_info = {}
frame_attributes = {}
# Default empty frame_attributes dict
def __init_subclass__(cls, **kwargs):
# We first check for explicitly set values for these:
default_repr = getattr(cls, 'default_representation', None)
default_diff = getattr(cls, 'default_differential', None)
repr_info = getattr(cls, 'frame_specific_representation_info', None)
# Then, to make sure this works for subclasses-of-subclasses, we also
# have to check for cases where the attribute names have already been
# replaced by underscore-prefaced equivalents by the logic below:
if default_repr is None or isinstance(default_repr, property):
default_repr = getattr(cls, '_default_representation', None)
if default_diff is None or isinstance(default_diff, property):
default_diff = getattr(cls, '_default_differential', None)
if repr_info is None or isinstance(repr_info, property):
repr_info = getattr(cls, '_frame_specific_representation_info', None)
repr_info = cls._infer_repr_info(repr_info)
# Make read-only properties for the frame class attributes that should
# be read-only to make them immutable after creation.
# We copy attributes instead of linking to make sure there's no
# accidental cross-talk between classes
cls._create_readonly_property('default_representation', default_repr,
'Default representation for position data')
cls._create_readonly_property('default_differential', default_diff,
'Default representation for differential data '
'(e.g., velocity)')
cls._create_readonly_property('frame_specific_representation_info',
copy.deepcopy(repr_info),
'Mapping for frame-specific component names')
# Set the frame attributes. We first construct the attributes from
# superclasses, going in reverse order to keep insertion order,
# and then add any attributes from the frame now being defined
# (if any old definitions are overridden, this keeps the order).
# Note that we cannot simply start with the inherited frame_attributes
# since we could be a mixin between multiple coordinate frames.
# TODO: Should this be made to use readonly_prop_factory as well or
# would it be inconvenient for getting the frame_attributes from
# classes?
frame_attrs = {}
for basecls in reversed(cls.__bases__):
if issubclass(basecls, BaseCoordinateFrame):
frame_attrs.update(basecls.frame_attributes)
for k, v in cls.__dict__.items():
if isinstance(v, Attribute):
frame_attrs[k] = v
cls.frame_attributes = frame_attrs
# Deal with setting the name of the frame:
if not hasattr(cls, 'name'):
cls.name = cls.__name__.lower()
elif (BaseCoordinateFrame not in cls.__bases__ and
cls.name in [getattr(base, 'name', None)
for base in cls.__bases__]):
# This may be a subclass of a subclass of BaseCoordinateFrame,
# like ICRS(BaseRADecFrame). In this case, cls.name will have been
# set by init_subclass
cls.name = cls.__name__.lower()
# A cache that *must be unique to each frame class* - it is
# insufficient to share them with superclasses, hence the need to put
# them in the meta
cls._frame_class_cache = {}
super().__init_subclass__(**kwargs)
def __init__(self, *args, copy=True, representation_type=None,
differential_type=None, **kwargs):
self._attr_names_with_defaults = []
self._representation = self._infer_representation(representation_type, differential_type)
self._data = self._infer_data(args, copy, kwargs) # possibly None.
# Set frame attributes, if any
values = {}
for fnm, fdefault in self.get_frame_attr_names().items():
# Read-only frame attributes are defined as FrameAttribute
# descriptors which are not settable, so set 'real' attributes as
# the name prefaced with an underscore.
if fnm in kwargs:
value = kwargs.pop(fnm)
setattr(self, '_' + fnm, value)
# Validate attribute by getting it. If the instance has data,
# this also checks its shape is OK. If not, we do it below.
values[fnm] = getattr(self, fnm)
else:
setattr(self, '_' + fnm, fdefault)
self._attr_names_with_defaults.append(fnm)
if kwargs:
raise TypeError(
f'Coordinate frame {self.__class__.__name__} got unexpected '
f'keywords: {list(kwargs)}')
# We do ``is None`` because self._data might evaluate to false for
# empty arrays or data == 0
if self._data is None:
# No data: we still need to check that any non-scalar attributes
# have consistent shapes. Collect them for all attributes with
# size > 1 (which should be array-like and thus have a shape).
shapes = {fnm: value.shape for fnm, value in values.items()
if getattr(value, 'shape', ())}
if shapes:
if len(shapes) > 1:
try:
self._no_data_shape = check_broadcast(*shapes.values())
except ValueError as err:
raise ValueError(
f"non-scalar attributes with inconsistent shapes: {shapes}") from err
# Above, we checked that it is possible to broadcast all
# shapes. By getting and thus validating the attributes,
# we verify that the attributes can in fact be broadcast.
for fnm in shapes:
getattr(self, fnm)
else:
self._no_data_shape = shapes.popitem()[1]
else:
self._no_data_shape = ()
# The logic of this block is not related to the previous one
if self._data is not None:
# This makes the cache keys backwards-compatible, but also adds
# support for having differentials attached to the frame data
# representation object.
if 's' in self._data.differentials:
# TODO: assumes a velocity unit differential
key = (self._data.__class__.__name__,
self._data.differentials['s'].__class__.__name__,
False)
else:
key = (self._data.__class__.__name__, False)
# Set up representation cache.
self.cache['representation'][key] = self._data
def _infer_representation(self, representation_type, differential_type):
if representation_type is None and differential_type is None:
return {'base': self.default_representation, 's': self.default_differential}
if representation_type is None:
representation_type = self.default_representation
if (inspect.isclass(differential_type)
and issubclass(differential_type, r.BaseDifferential)):
# TODO: assumes the differential class is for the velocity
# differential
differential_type = {'s': differential_type}
elif isinstance(differential_type, str):
# TODO: assumes the differential class is for the velocity
# differential
diff_cls = r.DIFFERENTIAL_CLASSES[differential_type]
differential_type = {'s': diff_cls}
elif differential_type is None:
if representation_type == self.default_representation:
differential_type = {'s': self.default_differential}
else:
differential_type = {'s': 'base'} # see set_representation_cls()
return _get_repr_classes(representation_type, **differential_type)
def _infer_data(self, args, copy, kwargs):
# if not set below, this is a frame with no data
representation_data = None
differential_data = None
args = list(args) # need to be able to pop them
if (len(args) > 0) and (isinstance(args[0], r.BaseRepresentation) or
args[0] is None):
representation_data = args.pop(0) # This can still be None
if len(args) > 0:
raise TypeError(
'Cannot create a frame with both a representation object '
'and other positional arguments')
if representation_data is not None:
diffs = representation_data.differentials
differential_data = diffs.get('s', None)
if ((differential_data is None and len(diffs) > 0) or
(differential_data is not None and len(diffs) > 1)):
raise ValueError('Multiple differentials are associated '
'with the representation object passed in '
'to the frame initializer. Only a single '
'velocity differential is supported. Got: '
'{}'.format(diffs))
else:
representation_cls = self.get_representation_cls()
# Get any representation data passed in to the frame initializer
# using keyword or positional arguments for the component names
repr_kwargs = {}
for nmkw, nmrep in self.representation_component_names.items():
if len(args) > 0:
# first gather up positional args
repr_kwargs[nmrep] = args.pop(0)
elif nmkw in kwargs:
repr_kwargs[nmrep] = kwargs.pop(nmkw)
# special-case the Spherical->UnitSpherical if no `distance`
if repr_kwargs:
# TODO: determine how to get rid of the part before the "try" -
# currently removing it has a performance regression for
# unitspherical because of the try-related overhead.
# Also frames have no way to indicate what the "distance" is
if repr_kwargs.get('distance', True) is None:
del repr_kwargs['distance']
if (issubclass(representation_cls,
r.SphericalRepresentation)
and 'distance' not in repr_kwargs):
representation_cls = representation_cls._unit_representation
try:
representation_data = representation_cls(copy=copy,
**repr_kwargs)
except TypeError as e:
# this except clause is here to make the names of the
# attributes more human-readable. Without this the names
# come from the representation instead of the frame's
# attribute names.
try:
representation_data = (
representation_cls._unit_representation(
copy=copy, **repr_kwargs))
except Exception:
msg = str(e)
names = self.get_representation_component_names()
for frame_name, repr_name in names.items():
msg = msg.replace(repr_name, frame_name)
msg = msg.replace('__init__()',
f'{self.__class__.__name__}()')
e.args = (msg,)
raise e
# Now we handle the Differential data:
# Get any differential data passed in to the frame initializer
# using keyword or positional arguments for the component names
differential_cls = self.get_representation_cls('s')
diff_component_names = self.get_representation_component_names('s')
diff_kwargs = {}
for nmkw, nmrep in diff_component_names.items():
if len(args) > 0:
# first gather up positional args
diff_kwargs[nmrep] = args.pop(0)
elif nmkw in kwargs:
diff_kwargs[nmrep] = kwargs.pop(nmkw)
if diff_kwargs:
if (hasattr(differential_cls, '_unit_differential')
and 'd_distance' not in diff_kwargs):
differential_cls = differential_cls._unit_differential
elif len(diff_kwargs) == 1 and 'd_distance' in diff_kwargs:
differential_cls = r.RadialDifferential
try:
differential_data = differential_cls(copy=copy,
**diff_kwargs)
except TypeError as e:
# this except clause is here to make the names of the
# attributes more human-readable. Without this the names
# come from the representation instead of the frame's
# attribute names.
msg = str(e)
names = self.get_representation_component_names('s')
for frame_name, repr_name in names.items():
msg = msg.replace(repr_name, frame_name)
msg = msg.replace('__init__()',
f'{self.__class__.__name__}()')
e.args = (msg,)
raise
if len(args) > 0:
raise TypeError(
'{}.__init__ had {} remaining unhandled arguments'.format(
self.__class__.__name__, len(args)))
if representation_data is None and differential_data is not None:
raise ValueError("Cannot pass in differential component data "
"without positional (representation) data.")
if differential_data:
# Check that differential data provided has units compatible
# with time-derivative of representation data.
# NOTE: there is no dimensionless time while lengths can be
# dimensionless (u.dimensionless_unscaled).
for comp in representation_data.components:
if (diff_comp := f'd_{comp}') in differential_data.components:
current_repr_unit = representation_data._units[comp]
current_diff_unit = differential_data._units[diff_comp]
expected_unit = current_repr_unit / u.s
if not current_diff_unit.is_equivalent(expected_unit):
for key, val in self.get_representation_component_names().items():
if val == comp:
current_repr_name = key
break
for key, val in self.get_representation_component_names('s').items():
if val == diff_comp:
current_diff_name = key
break
raise ValueError(
f'{current_repr_name} has unit "{current_repr_unit}" with physical '
f'type "{current_repr_unit.physical_type}", but {current_diff_name} '
f'has incompatible unit "{current_diff_unit}" with physical type '
f'"{current_diff_unit.physical_type}" instead of the expected '
f'"{(expected_unit).physical_type}".')
representation_data = representation_data.with_differentials({'s': differential_data})
return representation_data
@classmethod
def _infer_repr_info(cls, repr_info):
# Unless overridden via `frame_specific_representation_info`, velocity
# name defaults are (see also docstring for BaseCoordinateFrame):
# * ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for
# `SphericalCosLatDifferential` proper motion components
# * ``pm_{lon}``, ``pm_{lat}`` for `SphericalDifferential` proper
# motion components
# * ``radial_velocity`` for any `d_distance` component
# * ``v_{x,y,z}`` for `CartesianDifferential` velocity components
# where `{lon}` and `{lat}` are the frame names of the angular
# components.
if repr_info is None:
repr_info = {}
# the tuple() call below is necessary because if it is not there,
# the iteration proceeds in a difficult-to-predict manner in the
# case that one of the class objects hash is such that it gets
# revisited by the iteration. The tuple() call prevents this by
# making the items iterated over fixed regardless of how the dict
# changes
for cls_or_name in tuple(repr_info.keys()):
if isinstance(cls_or_name, str):
# TODO: this provides a layer of backwards compatibility in
# case the key is a string, but now we want explicit classes.
_cls = _get_repr_cls(cls_or_name)
repr_info[_cls] = repr_info.pop(cls_or_name)
# The default spherical names are 'lon' and 'lat'
repr_info.setdefault(r.SphericalRepresentation,
[RepresentationMapping('lon', 'lon'),
RepresentationMapping('lat', 'lat')])
sph_component_map = {m.reprname: m.framename
for m in repr_info[r.SphericalRepresentation]}
repr_info.setdefault(r.SphericalCosLatDifferential, [
RepresentationMapping(
'd_lon_coslat',
'pm_{lon}_cos{lat}'.format(**sph_component_map),
u.mas/u.yr),
RepresentationMapping('d_lat',
'pm_{lat}'.format(**sph_component_map),
u.mas/u.yr),
RepresentationMapping('d_distance', 'radial_velocity',
u.km/u.s)
])
repr_info.setdefault(r.SphericalDifferential, [
RepresentationMapping('d_lon',
'pm_{lon}'.format(**sph_component_map),
u.mas/u.yr),
RepresentationMapping('d_lat',
'pm_{lat}'.format(**sph_component_map),
u.mas/u.yr),
RepresentationMapping('d_distance', 'radial_velocity',
u.km/u.s)
])
repr_info.setdefault(r.CartesianDifferential, [
RepresentationMapping('d_x', 'v_x', u.km/u.s),
RepresentationMapping('d_y', 'v_y', u.km/u.s),
RepresentationMapping('d_z', 'v_z', u.km/u.s)])
# Unit* classes should follow the same naming conventions
# TODO: this adds some unnecessary mappings for the Unit classes, so
# this could be cleaned up, but in practice doesn't seem to have any
# negative side effects
repr_info.setdefault(r.UnitSphericalRepresentation,
repr_info[r.SphericalRepresentation])
repr_info.setdefault(r.UnitSphericalCosLatDifferential,
repr_info[r.SphericalCosLatDifferential])
repr_info.setdefault(r.UnitSphericalDifferential,
repr_info[r.SphericalDifferential])
return repr_info
@classmethod
def _create_readonly_property(cls, attr_name, value, doc=None):
private_attr = '_' + attr_name
def getter(self):
return getattr(self, private_attr)
setattr(cls, private_attr, value)
setattr(cls, attr_name, property(getter, doc=doc))
@lazyproperty
def cache(self):
"""
Cache for this frame, a dict. It stores anything that should be
computed from the coordinate data (*not* from the frame attributes).
This can be used in functions to store anything that might be
expensive to compute but might be re-used by some other function.
E.g.::
if 'user_data' in myframe.cache:
data = myframe.cache['user_data']
else:
myframe.cache['user_data'] = data = expensive_func(myframe.lat)
If in-place modifications are made to the frame data, the cache should
be cleared::
myframe.cache.clear()
"""
return defaultdict(dict)
@property
def data(self):
"""
The coordinate data for this object. If this frame has no data, an
`ValueError` will be raised. Use `has_data` to
check if data is present on this frame object.
"""
if self._data is None:
raise ValueError('The frame object "{!r}" does not have '
'associated data'.format(self))
return self._data
@property
def has_data(self):
"""
True if this frame has `data`, False otherwise.
"""
return self._data is not None
@property
def shape(self):
return self.data.shape if self.has_data else self._no_data_shape
# We have to override the ShapedLikeNDArray definitions, since our shape
# does not have to be that of the data.
def __len__(self):
return len(self.data)
def __bool__(self):
return self.has_data and self.size > 0
@property
def size(self):
return self.data.size
@property
def isscalar(self):
return self.has_data and self.data.isscalar
@classmethod
def get_frame_attr_names(cls):
return {name: getattr(cls, name)
for name in cls.frame_attributes}
def get_representation_cls(self, which='base'):
"""The class used for part of this frame's data.
Parameters
----------
which : ('base', 's', `None`)
The class of which part to return. 'base' means the class used to
represent the coordinates; 's' the first derivative to time, i.e.,
the class representing the proper motion and/or radial velocity.
If `None`, return a dict with both.
Returns
-------
representation : `~astropy.coordinates.BaseRepresentation` or `~astropy.coordinates.BaseDifferential`.
"""
if which is not None:
return self._representation[which]
else:
return self._representation
def set_representation_cls(self, base=None, s='base'):
"""Set representation and/or differential class for this frame's data.
Parameters
----------
base : str, `~astropy.coordinates.BaseRepresentation` subclass, optional
The name or subclass to use to represent the coordinate data.
s : `~astropy.coordinates.BaseDifferential` subclass, optional
The differential subclass to use to represent any velocities,
such as proper motion and radial velocity. If equal to 'base',
which is the default, it will be inferred from the representation.
If `None`, the representation will drop any differentials.
"""
if base is None:
base = self._representation['base']
self._representation = _get_repr_classes(base=base, s=s)
representation_type = property(
fget=get_representation_cls, fset=set_representation_cls,
doc="""The representation class used for this frame's data.
This will be a subclass from `~astropy.coordinates.BaseRepresentation`.
Can also be *set* using the string name of the representation. If you
wish to set an explicit differential class (rather than have it be
inferred), use the ``set_representation_cls`` method.
""")
@property
def differential_type(self):
"""
The differential used for this frame's data.
This will be a subclass from `~astropy.coordinates.BaseDifferential`.
For simultaneous setting of representation and differentials, see the
``set_representation_cls`` method.
"""
return self.get_representation_cls('s')
@differential_type.setter
def differential_type(self, value):
self.set_representation_cls(s=value)
@classmethod
def _get_representation_info(cls):
# This exists as a class method only to support handling frame inputs
# without units, which are deprecated and will be removed. This can be
# moved into the representation_info property at that time.
# note that if so moved, the cache should be acceessed as
# self.__class__._frame_class_cache
if cls._frame_class_cache.get('last_reprdiff_hash', None) != r.get_reprdiff_cls_hash():
repr_attrs = {}
for repr_diff_cls in (list(r.REPRESENTATION_CLASSES.values()) +
list(r.DIFFERENTIAL_CLASSES.values())):
repr_attrs[repr_diff_cls] = {'names': [], 'units': []}
for c, c_cls in repr_diff_cls.attr_classes.items():
repr_attrs[repr_diff_cls]['names'].append(c)
rec_unit = u.deg if issubclass(c_cls, Angle) else None
repr_attrs[repr_diff_cls]['units'].append(rec_unit)
for repr_diff_cls, mappings in cls._frame_specific_representation_info.items():
# take the 'names' and 'units' tuples from repr_attrs,
# and then use the RepresentationMapping objects
# to update as needed for this frame.
nms = repr_attrs[repr_diff_cls]['names']
uns = repr_attrs[repr_diff_cls]['units']
comptomap = {m.reprname: m for m in mappings}
for i, c in enumerate(repr_diff_cls.attr_classes.keys()):
if c in comptomap:
mapp = comptomap[c]
nms[i] = mapp.framename
# need the isinstance because otherwise if it's a unit it
# will try to compare to the unit string representation
if not (isinstance(mapp.defaultunit, str)
and mapp.defaultunit == 'recommended'):
uns[i] = mapp.defaultunit
# else we just leave it as recommended_units says above
# Convert to tuples so that this can't mess with frame internals
repr_attrs[repr_diff_cls]['names'] = tuple(nms)
repr_attrs[repr_diff_cls]['units'] = tuple(uns)
cls._frame_class_cache['representation_info'] = repr_attrs
cls._frame_class_cache['last_reprdiff_hash'] = r.get_reprdiff_cls_hash()
return cls._frame_class_cache['representation_info']
@lazyproperty
def representation_info(self):
"""
A dictionary with the information of what attribute names for this frame
apply to particular representations.
"""
return self._get_representation_info()
def get_representation_component_names(self, which='base'):
out = {}
repr_or_diff_cls = self.get_representation_cls(which)
if repr_or_diff_cls is None:
return out
data_names = repr_or_diff_cls.attr_classes.keys()
repr_names = self.representation_info[repr_or_diff_cls]['names']
for repr_name, data_name in zip(repr_names, data_names):
out[repr_name] = data_name
return out
def get_representation_component_units(self, which='base'):
out = {}
repr_or_diff_cls = self.get_representation_cls(which)
if repr_or_diff_cls is None:
return out
repr_attrs = self.representation_info[repr_or_diff_cls]
repr_names = repr_attrs['names']
repr_units = repr_attrs['units']
for repr_name, repr_unit in zip(repr_names, repr_units):
if repr_unit:
out[repr_name] = repr_unit
return out
representation_component_names = property(get_representation_component_names)
representation_component_units = property(get_representation_component_units)
def _replicate(self, data, copy=False, **kwargs):
"""Base for replicating a frame, with possibly different attributes.
Produces a new instance of the frame using the attributes of the old
frame (unless overridden) and with the data given.
Parameters
----------
data : `~astropy.coordinates.BaseRepresentation` or None
Data to use in the new frame instance. If `None`, it will be
a data-less frame.
copy : bool, optional
Whether data and the attributes on the old frame should be copied
(default), or passed on by reference.
**kwargs
Any attributes that should be overridden.
"""
# This is to provide a slightly nicer error message if the user tries
# to use frame_obj.representation instead of frame_obj.data to get the
# underlying representation object [e.g., #2890]
if inspect.isclass(data):
raise TypeError('Class passed as data instead of a representation '
'instance. If you called frame.representation, this'
' returns the representation class. frame.data '
'returns the instantiated object - you may want to '
' use this instead.')
if copy and data is not None:
data = data.copy()
for attr in self.get_frame_attr_names():
if (attr not in self._attr_names_with_defaults
and attr not in kwargs):
value = getattr(self, attr)
if copy:
value = value.copy()
kwargs[attr] = value
return self.__class__(data, copy=False, **kwargs)
def replicate(self, copy=False, **kwargs):
"""
Return a replica of the frame, optionally with new frame attributes.
The replica is a new frame object that has the same data as this frame
object and with frame attributes overridden if they are provided as extra
keyword arguments to this method. If ``copy`` is set to `True` then a
copy of the internal arrays will be made. Otherwise the replica will
use a reference to the original arrays when possible to save memory. The
internal arrays are normally not changeable by the user so in most cases
it should not be necessary to set ``copy`` to `True`.
Parameters
----------
copy : bool, optional
If True, the resulting object is a copy of the data. When False,
references are used where possible. This rule also applies to the
frame attributes.
**kwargs
Any additional keywords are treated as frame attributes to be set on the
new frame object.
Returns
-------
frameobj : `BaseCoordinateFrame` subclass instance
Replica of this object, but possibly with new frame attributes.
"""
return self._replicate(self.data, copy=copy, **kwargs)
def replicate_without_data(self, copy=False, **kwargs):
"""
Return a replica without data, optionally with new frame attributes.
The replica is a new frame object without data but with the same frame
attributes as this object, except where overridden by extra keyword
arguments to this method. The ``copy`` keyword determines if the frame
attributes are truly copied vs being references (which saves memory for
cases where frame attributes are large).
This method is essentially the converse of `realize_frame`.
Parameters
----------
copy : bool, optional
If True, the resulting object has copies of the frame attributes.
When False, references are used where possible.
**kwargs
Any additional keywords are treated as frame attributes to be set on the
new frame object.
Returns
-------
frameobj : `BaseCoordinateFrame` subclass instance
Replica of this object, but without data and possibly with new frame
attributes.
"""
return self._replicate(None, copy=copy, **kwargs)
def realize_frame(self, data, **kwargs):
"""
Generates a new frame with new data from another frame (which may or
may not have data). Roughly speaking, the converse of
`replicate_without_data`.
Parameters
----------
data : `~astropy.coordinates.BaseRepresentation`
The representation to use as the data for the new frame.
**kwargs
Any additional keywords are treated as frame attributes to be set on the
new frame object. In particular, `representation_type` can be specified.
Returns
-------
frameobj : `BaseCoordinateFrame` subclass instance
A new object in *this* frame, with the same frame attributes as
this one, but with the ``data`` as the coordinate data.
"""
return self._replicate(data, **kwargs)
def represent_as(self, base, s='base', in_frame_units=False):
"""
Generate and return a new representation of this frame's `data`
as a Representation object.
Note: In order to make an in-place change of the representation
of a Frame or SkyCoord object, set the ``representation``
attribute of that object to the desired new representation, or
use the ``set_representation_cls`` method to also set the differential.
Parameters
----------
base : subclass of BaseRepresentation or string
The type of representation to generate. Must be a *class*
(not an instance), or the string name of the representation
class.
s : subclass of `~astropy.coordinates.BaseDifferential`, str, optional
Class in which any velocities should be represented. Must be
a *class* (not an instance), or the string name of the
differential class. If equal to 'base' (default), inferred from
the base class. If `None`, all velocity information is dropped.
in_frame_units : bool, keyword-only
Force the representation units to match the specified units
particular to this frame
Returns
-------
newrep : BaseRepresentation-derived object
A new representation object of this frame's `data`.
Raises
------
AttributeError
If this object had no `data`
Examples
--------
>>> from astropy import units as u
>>> from astropy.coordinates import SkyCoord, CartesianRepresentation
>>> coord = SkyCoord(0*u.deg, 0*u.deg)
>>> coord.represent_as(CartesianRepresentation) # doctest: +FLOAT_CMP
<CartesianRepresentation (x, y, z) [dimensionless]
(1., 0., 0.)>
>>> coord.representation_type = CartesianRepresentation
>>> coord # doctest: +FLOAT_CMP
<SkyCoord (ICRS): (x, y, z) [dimensionless]
(1., 0., 0.)>
"""
# For backwards compatibility (because in_frame_units used to be the
# 2nd argument), we check to see if `new_differential` is a boolean. If
# it is, we ignore the value of `new_differential` and warn about the
# position change
if isinstance(s, bool):
warnings.warn("The argument position for `in_frame_units` in "
"`represent_as` has changed. Use as a keyword "
"argument if needed.", AstropyWarning)
in_frame_units = s
s = 'base'
# In the future, we may want to support more differentials, in which
# case one probably needs to define **kwargs above and use it here.
# But for now, we only care about the velocity.
repr_classes = _get_repr_classes(base=base, s=s)
representation_cls = repr_classes['base']
# We only keep velocity information
if 's' in self.data.differentials:
# For the default 'base' option in which _get_repr_classes has
# given us a best guess based on the representation class, we only
# use it if the class we had already is incompatible.
if (s == 'base'
and (self.data.differentials['s'].__class__
in representation_cls._compatible_differentials)):
differential_cls = self.data.differentials['s'].__class__
else:
differential_cls = repr_classes['s']
elif s is None or s == 'base':
differential_cls = None
else:
raise TypeError('Frame data has no associated differentials '
'(i.e. the frame has no velocity data) - '
'represent_as() only accepts a new '
'representation.')
if differential_cls:
cache_key = (representation_cls.__name__,
differential_cls.__name__, in_frame_units)
else:
cache_key = (representation_cls.__name__, in_frame_units)
cached_repr = self.cache['representation'].get(cache_key)
if not cached_repr:
if differential_cls:
# Sanity check to ensure we do not just drop radial
# velocity. TODO: should Representation.represent_as
# allow this transformation in the first place?
if (isinstance(self.data, r.UnitSphericalRepresentation)
and issubclass(representation_cls, r.CartesianRepresentation)
and not isinstance(self.data.differentials['s'],
(r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential))):
raise u.UnitConversionError(
'need a distance to retrieve a cartesian representation '
'when both radial velocity and proper motion are present, '
'since otherwise the units cannot match.')
# TODO NOTE: only supports a single differential
data = self.data.represent_as(representation_cls,
differential_cls)
diff = data.differentials['s'] # TODO: assumes velocity
else:
data = self.data.represent_as(representation_cls)
# If the new representation is known to this frame and has a defined
# set of names and units, then use that.
new_attrs = self.representation_info.get(representation_cls)
if new_attrs and in_frame_units:
datakwargs = {comp: getattr(data, comp) for comp in data.components}
for comp, new_attr_unit in zip(data.components, new_attrs['units']):
if new_attr_unit:
datakwargs[comp] = datakwargs[comp].to(new_attr_unit)
data = data.__class__(copy=False, **datakwargs)
if differential_cls:
# the original differential
data_diff = self.data.differentials['s']
# If the new differential is known to this frame and has a
# defined set of names and units, then use that.
new_attrs = self.representation_info.get(differential_cls)
if new_attrs and in_frame_units:
diffkwargs = {comp: getattr(diff, comp) for comp in diff.components}
for comp, new_attr_unit in zip(diff.components,
new_attrs['units']):
# Some special-casing to treat a situation where the
# input data has a UnitSphericalDifferential or a
# RadialDifferential. It is re-represented to the
# frame's differential class (which might be, e.g., a
# dimensional Differential), so we don't want to try to
# convert the empty component units
if (isinstance(data_diff,
(r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential))
and comp not in data_diff.__class__.attr_classes):
continue
elif (isinstance(data_diff, r.RadialDifferential)
and comp not in data_diff.__class__.attr_classes):
continue
# Try to convert to requested units. Since that might
# not be possible (e.g., for a coordinate with proper
# motion but without distance, one cannot convert to a
# cartesian differential in km/s), we allow the unit
# conversion to fail. See gh-7028 for discussion.
if new_attr_unit and hasattr(diff, comp):
try:
diffkwargs[comp] = diffkwargs[comp].to(new_attr_unit)
except Exception:
pass
diff = diff.__class__(copy=False, **diffkwargs)
# Here we have to bypass using with_differentials() because
# it has a validation check. But because
# .representation_type and .differential_type don't point to
# the original classes, if the input differential is a
# RadialDifferential, it usually gets turned into a
# SphericalCosLatDifferential (or whatever the default is)
# with strange units for the d_lon and d_lat attributes.
# This then causes the dictionary key check to fail (i.e.
# comparison against `diff._get_deriv_key()`)
data._differentials.update({'s': diff})
self.cache['representation'][cache_key] = data
return self.cache['representation'][cache_key]
def transform_to(self, new_frame):
"""
Transform this object's coordinate data to a new frame.
Parameters
----------
new_frame : coordinate-like or `BaseCoordinateFrame` subclass instance
The frame to transform this coordinate frame into.
The frame class option is deprecated.
Returns
-------
transframe : coordinate-like
A new object with the coordinate data represented in the
``newframe`` system.
Raises
------
ValueError
If there is no possible transformation route.
"""
from .errors import ConvertError
if self._data is None:
raise ValueError('Cannot transform a frame with no data')
if (getattr(self.data, 'differentials', None)
and hasattr(self, 'obstime') and hasattr(new_frame, 'obstime')
and np.any(self.obstime != new_frame.obstime)):
raise NotImplementedError('You cannot transform a frame that has '
'velocities to another frame at a '
'different obstime. If you think this '
'should (or should not) be possible, '
'please comment at https://github.com/astropy/astropy/issues/6280')
if inspect.isclass(new_frame):
warnings.warn("Transforming a frame instance to a frame class (as opposed to another "
"frame instance) will not be supported in the future. Either "
"explicitly instantiate the target frame, or first convert the source "
"frame instance to a `astropy.coordinates.SkyCoord` and use its "
"`transform_to()` method.",
AstropyDeprecationWarning)
# Use the default frame attributes for this class
new_frame = new_frame()
if hasattr(new_frame, '_sky_coord_frame'):
# Input new_frame is not a frame instance or class and is most
# likely a SkyCoord object.
new_frame = new_frame._sky_coord_frame
trans = frame_transform_graph.get_transform(self.__class__,
new_frame.__class__)
if trans is None:
if new_frame is self.__class__:
# no special transform needed, but should update frame info
return new_frame.realize_frame(self.data)
msg = 'Cannot transform from {0} to {1}'
raise ConvertError(msg.format(self.__class__, new_frame.__class__))
return trans(self, new_frame)
def is_transformable_to(self, new_frame):
"""
Determines if this coordinate frame can be transformed to another
given frame.
Parameters
----------
new_frame : `BaseCoordinateFrame` subclass or instance
The proposed frame to transform into.
Returns
-------
transformable : bool or str
`True` if this can be transformed to ``new_frame``, `False` if
not, or the string 'same' if ``new_frame`` is the same system as
this object but no transformation is defined.
Notes
-----
A return value of 'same' means the transformation will work, but it will
just give back a copy of this object. The intended usage is::
if coord.is_transformable_to(some_unknown_frame):
coord2 = coord.transform_to(some_unknown_frame)
This will work even if ``some_unknown_frame`` turns out to be the same
frame class as ``coord``. This is intended for cases where the frame
is the same regardless of the frame attributes (e.g. ICRS), but be
aware that it *might* also indicate that someone forgot to define the
transformation between two objects of the same frame class but with
different attributes.
"""
new_frame_cls = new_frame if inspect.isclass(new_frame) else new_frame.__class__
trans = frame_transform_graph.get_transform(self.__class__, new_frame_cls)
if trans is None:
if new_frame_cls is self.__class__:
return 'same'
else:
return False
else:
return True
def is_frame_attr_default(self, attrnm):
"""
Determine whether or not a frame attribute has its value because it's
the default value, or because this frame was created with that value
explicitly requested.
Parameters
----------
attrnm : str
The name of the attribute to check.
Returns
-------
isdefault : bool
True if the attribute ``attrnm`` has its value by default, False if
it was specified at creation of this frame.
"""
return attrnm in self._attr_names_with_defaults
@staticmethod
def _frameattr_equiv(left_fattr, right_fattr):
"""
Determine if two frame attributes are equivalent. Implemented as a
staticmethod mainly as a convenient location, although conceivable it
might be desirable for subclasses to override this behavior.
Primary purpose is to check for equality of representations. This
aspect can actually be simplified/removed now that representations have
equality defined.
Secondary purpose is to check for equality of coordinate attributes,
which first checks whether they themselves are in equivalent frames
before checking for equality in the normal fashion. This is because
checking for equality with non-equivalent frames raises an error.
"""
if left_fattr is right_fattr:
# shortcut if it's exactly the same object
return True
elif left_fattr is None or right_fattr is None:
# shortcut if one attribute is unspecified and the other isn't
return False
left_is_repr = isinstance(left_fattr, r.BaseRepresentationOrDifferential)
right_is_repr = isinstance(right_fattr, r.BaseRepresentationOrDifferential)
if left_is_repr and right_is_repr:
# both are representations.
if (getattr(left_fattr, 'differentials', False) or
getattr(right_fattr, 'differentials', False)):
warnings.warn('Two representation frame attributes were '
'checked for equivalence when at least one of'
' them has differentials. This yields False '
'even if the underlying representations are '
'equivalent (although this may change in '
'future versions of Astropy)', AstropyWarning)
return False
if isinstance(right_fattr, left_fattr.__class__):
# if same representation type, compare components.
return np.all([(getattr(left_fattr, comp) ==
getattr(right_fattr, comp))
for comp in left_fattr.components])
else:
# convert to cartesian and see if they match
return np.all(left_fattr.to_cartesian().xyz ==
right_fattr.to_cartesian().xyz)
elif left_is_repr or right_is_repr:
return False
left_is_coord = isinstance(left_fattr, BaseCoordinateFrame)
right_is_coord = isinstance(right_fattr, BaseCoordinateFrame)
if left_is_coord and right_is_coord:
# both are coordinates
if left_fattr.is_equivalent_frame(right_fattr):
return np.all(left_fattr == right_fattr)
else:
return False
elif left_is_coord or right_is_coord:
return False
return np.all(left_fattr == right_fattr)
def is_equivalent_frame(self, other):
"""
Checks if this object is the same frame as the ``other`` object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. Note that it does *not* matter what, if any,
data either object has.
Parameters
----------
other : :class:`~astropy.coordinates.BaseCoordinateFrame`
the other frame to check
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a `BaseCoordinateFrame` or subclass.
"""
if self.__class__ == other.__class__:
for frame_attr_name in self.get_frame_attr_names():
if not self._frameattr_equiv(getattr(self, frame_attr_name),
getattr(other, frame_attr_name)):
return False
return True
elif not isinstance(other, BaseCoordinateFrame):
raise TypeError("Tried to do is_equivalent_frame on something that "
"isn't a frame")
else:
return False
def __repr__(self):
frameattrs = self._frame_attrs_repr()
data_repr = self._data_repr()
if frameattrs:
frameattrs = f' ({frameattrs})'
if data_repr:
return f'<{self.__class__.__name__} Coordinate{frameattrs}: {data_repr}>'
else:
return f'<{self.__class__.__name__} Frame{frameattrs}>'
def _data_repr(self):
"""Returns a string representation of the coordinate data."""
if not self.has_data:
return ''
if self.representation_type:
if (hasattr(self.representation_type, '_unit_representation')
and isinstance(self.data,
self.representation_type._unit_representation)):
rep_cls = self.data.__class__
else:
rep_cls = self.representation_type
if 's' in self.data.differentials:
dif_cls = self.get_representation_cls('s')
dif_data = self.data.differentials['s']
if isinstance(dif_data, (r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential)):
dif_cls = dif_data.__class__
else:
dif_cls = None
data = self.represent_as(rep_cls, dif_cls, in_frame_units=True)
data_repr = repr(data)
# Generate the list of component names out of the repr string
part1, _, remainder = data_repr.partition('(')
if remainder != '':
comp_str, _, part2 = remainder.partition(')')
comp_names = comp_str.split(', ')
# Swap in frame-specific component names
invnames = {nmrepr: nmpref
for nmpref, nmrepr in self.representation_component_names.items()}
for i, name in enumerate(comp_names):
comp_names[i] = invnames.get(name, name)
# Reassemble the repr string
data_repr = part1 + '(' + ', '.join(comp_names) + ')' + part2
else:
data = self.data
data_repr = repr(self.data)
if data_repr.startswith('<' + data.__class__.__name__):
# remove both the leading "<" and the space after the name, as well
# as the trailing ">"
data_repr = data_repr[(len(data.__class__.__name__) + 2):-1]
else:
data_repr = 'Data:\n' + data_repr
if 's' in self.data.differentials:
data_repr_spl = data_repr.split('\n')
if 'has differentials' in data_repr_spl[-1]:
diffrepr = repr(data.differentials['s']).split('\n')
if diffrepr[0].startswith('<'):
diffrepr[0] = ' ' + ' '.join(diffrepr[0].split(' ')[1:])
for frm_nm, rep_nm in self.get_representation_component_names('s').items():
diffrepr[0] = diffrepr[0].replace(rep_nm, frm_nm)
if diffrepr[-1].endswith('>'):
diffrepr[-1] = diffrepr[-1][:-1]
data_repr_spl[-1] = '\n'.join(diffrepr)
data_repr = '\n'.join(data_repr_spl)
return data_repr
def _frame_attrs_repr(self):
"""
Returns a string representation of the frame's attributes, if any.
"""
attr_strs = []
for attribute_name in self.get_frame_attr_names():
attr = getattr(self, attribute_name)
# Check to see if this object has a way of representing itself
# specific to being an attribute of a frame. (Note, this is not the
# Attribute class, it's the actual object).
if hasattr(attr, "_astropy_repr_in_frame"):
attrstr = attr._astropy_repr_in_frame()
else:
attrstr = str(attr)
attr_strs.append(f"{attribute_name}={attrstr}")
return ', '.join(attr_strs)
def _apply(self, method, *args, **kwargs):
"""Create a new instance, applying a method to the underlying data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be
applied to the underlying arrays in the representation (e.g., ``x``,
``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),
as well as to any frame attributes that have a shape, with the results
used to create a new instance.
Internally, it is also used to apply functions to the above parts
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args : tuple
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
def apply_method(value):
if isinstance(value, ShapedLikeNDArray):
return value._apply(method, *args, **kwargs)
else:
if callable(method):
return method(value, *args, **kwargs)
else:
return getattr(value, method)(*args, **kwargs)
new = super().__new__(self.__class__)
if hasattr(self, '_representation'):
new._representation = self._representation.copy()
new._attr_names_with_defaults = self._attr_names_with_defaults.copy()
for attr in self.frame_attributes:
_attr = '_' + attr
if attr in self._attr_names_with_defaults:
setattr(new, _attr, getattr(self, _attr))
else:
value = getattr(self, _attr)
if getattr(value, 'shape', ()):
value = apply_method(value)
elif method == 'copy' or method == 'flatten':
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
value = copy.copy(value)
setattr(new, _attr, value)
if self.has_data:
new._data = apply_method(self.data)
else:
new._data = None
shapes = [getattr(new, '_' + attr).shape
for attr in new.frame_attributes
if (attr not in new._attr_names_with_defaults
and getattr(getattr(new, '_' + attr), 'shape', ()))]
if shapes:
new._no_data_shape = (check_broadcast(*shapes)
if len(shapes) > 1 else shapes[0])
else:
new._no_data_shape = ()
return new
def __setitem__(self, item, value):
if self.__class__ is not value.__class__:
raise TypeError(f'can only set from object of same class: '
f'{self.__class__.__name__} vs. '
f'{value.__class__.__name__}')
if not self.is_equivalent_frame(value):
raise ValueError('can only set frame item from an equivalent frame')
if value._data is None:
raise ValueError('can only set frame with value that has data')
if self._data is None:
raise ValueError('cannot set frame which has no data')
if self.shape == ():
raise TypeError(f"scalar '{self.__class__.__name__}' frame object "
f"does not support item assignment")
if self._data is None:
raise ValueError('can only set frame if it has data')
if self._data.__class__ is not value._data.__class__:
raise TypeError(f'can only set from object of same class: '
f'{self._data.__class__.__name__} vs. '
f'{value._data.__class__.__name__}')
if self._data._differentials:
# Can this ever occur? (Same class but different differential keys).
# This exception is not tested since it is not clear how to generate it.
if self._data._differentials.keys() != value._data._differentials.keys():
raise ValueError(f'setitem value must have same differentials')
for key, self_diff in self._data._differentials.items():
if self_diff.__class__ is not value._data._differentials[key].__class__:
raise TypeError(f'can only set from object of same class: '
f'{self_diff.__class__.__name__} vs. '
f'{value._data._differentials[key].__class__.__name__}')
# Set representation data
self._data[item] = value._data
# Frame attributes required to be identical by is_equivalent_frame,
# no need to set them here.
self.cache.clear()
@override__dir__
def __dir__(self):
"""
Override the builtin `dir` behavior to include representation
names.
TODO: dynamic representation transforms (i.e. include cylindrical et al.).
"""
dir_values = set(self.representation_component_names)
dir_values |= set(self.get_representation_component_names('s'))
return dir_values
def __getattr__(self, attr):
"""
Allow access to attributes on the representation and differential as
found via ``self.get_representation_component_names``.
TODO: We should handle dynamic representation transforms here (e.g.,
`.cylindrical`) instead of defining properties as below.
"""
# attr == '_representation' is likely from the hasattr() test in the
# representation property which is used for
# self.representation_component_names.
#
# Prevent infinite recursion here.
if attr.startswith('_'):
return self.__getattribute__(attr) # Raise AttributeError.
repr_names = self.representation_component_names
if attr in repr_names:
if self._data is None:
self.data # this raises the "no data" error by design - doing it
# this way means we don't have to replicate the error message here
rep = self.represent_as(self.representation_type,
in_frame_units=True)
val = getattr(rep, repr_names[attr])
return val
diff_names = self.get_representation_component_names('s')
if attr in diff_names:
if self._data is None:
self.data # see above.
# TODO: this doesn't work for the case when there is only
# unitspherical information. The differential_type gets set to the
# default_differential, which expects full information, so the
# units don't work out
rep = self.represent_as(in_frame_units=True,
**self.get_representation_cls(None))
val = getattr(rep.differentials['s'], diff_names[attr])
return val
return self.__getattribute__(attr) # Raise AttributeError.
def __setattr__(self, attr, value):
# Don't slow down access of private attributes!
if not attr.startswith('_'):
if hasattr(self, 'representation_info'):
repr_attr_names = set()
for representation_attr in self.representation_info.values():
repr_attr_names.update(representation_attr['names'])
if attr in repr_attr_names:
raise AttributeError(
f'Cannot set any frame attribute {attr}')
super().__setattr__(attr, value)
def __eq__(self, value):
"""Equality operator for frame.
This implements strict equality and requires that the frames are
equivalent and that the representation data are exactly equal.
"""
is_equiv = self.is_equivalent_frame(value)
if self._data is None and value._data is None:
# For Frame with no data, == compare is same as is_equivalent_frame()
return is_equiv
if not is_equiv:
raise TypeError(f'cannot compare: objects must have equivalent frames: '
f'{self.replicate_without_data()} vs. '
f'{value.replicate_without_data()}')
if ((value._data is None and self._data is not None)
or (self._data is None and value._data is not None)):
raise ValueError('cannot compare: one frame has data and the other '
'does not')
return self._data == value._data
def __ne__(self, value):
return np.logical_not(self == value)
def separation(self, other):
"""
Computes on-sky separation between this coordinate and another.
.. note::
If the ``other`` coordinate object is in a different frame, it is
first transformed to the frame of this object. This can lead to
unintuitive behavior if not accounted for. Particularly of note is
that ``self.separation(other)`` and ``other.separation(self)`` may
not give the same answer in this case.
Parameters
----------
other : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Angle`
The on-sky separation between this and the ``other`` coordinate.
Notes
-----
The separation is calculated using the Vincenty formula, which
is stable at all locations, including poles and antipodes [1]_.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
from .angle_utilities import angular_separation
from .angles import Angle
self_unit_sph = self.represent_as(r.UnitSphericalRepresentation)
other_transformed = other.transform_to(self)
other_unit_sph = other_transformed.represent_as(r.UnitSphericalRepresentation)
# Get the separation as a Quantity, convert to Angle in degrees
sep = angular_separation(self_unit_sph.lon, self_unit_sph.lat,
other_unit_sph.lon, other_unit_sph.lat)
return Angle(sep, unit=u.degree)
def separation_3d(self, other):
"""
Computes three dimensional separation between this coordinate
and another.
Parameters
----------
other : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate system to get the distance to.
Returns
-------
sep : `~astropy.coordinates.Distance`
The real-space distance between these two coordinates.
Raises
------
ValueError
If this or the other coordinate do not have distances.
"""
from .distances import Distance
if issubclass(self.data.__class__, r.UnitSphericalRepresentation):
raise ValueError('This object does not have a distance; cannot '
'compute 3d separation.')
# do this first just in case the conversion somehow creates a distance
other_in_self_system = other.transform_to(self)
if issubclass(other_in_self_system.__class__, r.UnitSphericalRepresentation):
raise ValueError('The other object does not have a distance; '
'cannot compute 3d separation.')
# drop the differentials to ensure they don't do anything odd in the
# subtraction
self_car = self.data.without_differentials().represent_as(r.CartesianRepresentation)
other_car = other_in_self_system.data.without_differentials().represent_as(r.CartesianRepresentation)
dist = (self_car - other_car).norm()
if dist.unit == u.one:
return dist
else:
return Distance(dist)
@property
def cartesian(self):
"""
Shorthand for a cartesian representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as('cartesian', in_frame_units=True)
@property
def cylindrical(self):
"""
Shorthand for a cylindrical representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as('cylindrical', in_frame_units=True)
@property
def spherical(self):
"""
Shorthand for a spherical representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as('spherical', in_frame_units=True)
@property
def sphericalcoslat(self):
"""
Shorthand for a spherical representation of the positional data and a
`SphericalCosLatDifferential` for the velocity data in this object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as('spherical', 'sphericalcoslat',
in_frame_units=True)
@property
def velocity(self):
"""
Shorthand for retrieving the Cartesian space-motion as a
`CartesianDifferential` object. This is equivalent to calling
``self.cartesian.differentials['s']``.
"""
if 's' not in self.data.differentials:
raise ValueError('Frame has no associated velocity (Differential) '
'data information.')
return self.cartesian.differentials['s']
@property
def proper_motion(self):
"""
Shorthand for the two-dimensional proper motion as a
`~astropy.units.Quantity` object with angular velocity units. In the
returned `~astropy.units.Quantity`, ``axis=0`` is the longitude/latitude
dimension so that ``.proper_motion[0]`` is the longitudinal proper
motion and ``.proper_motion[1]`` is latitudinal. The longitudinal proper
motion already includes the cos(latitude) term.
"""
if 's' not in self.data.differentials:
raise ValueError('Frame has no associated velocity (Differential) '
'data information.')
sph = self.represent_as('spherical', 'sphericalcoslat',
in_frame_units=True)
pm_lon = sph.differentials['s'].d_lon_coslat
pm_lat = sph.differentials['s'].d_lat
return np.stack((pm_lon.value,
pm_lat.to(pm_lon.unit).value), axis=0) * pm_lon.unit
@property
def radial_velocity(self):
"""
Shorthand for the radial or line-of-sight velocity as a
`~astropy.units.Quantity` object.
"""
if 's' not in self.data.differentials:
raise ValueError('Frame has no associated velocity (Differential) '
'data information.')
sph = self.represent_as('spherical', in_frame_units=True)
return sph.differentials['s'].d_distance
class GenericFrame(BaseCoordinateFrame):
"""
A frame object that can't store data but can hold any arbitrary frame
attributes. Mostly useful as a utility for the high-level class to store
intermediate frame attributes.
Parameters
----------
frame_attrs : dict
A dictionary of attributes to be used as the frame attributes for this
frame.
"""
name = None # it's not a "real" frame so it doesn't have a name
def __init__(self, frame_attrs):
self.frame_attributes = {}
for name, default in frame_attrs.items():
self.frame_attributes[name] = Attribute(default)
setattr(self, '_' + name, default)
super().__init__(None)
def __getattr__(self, name):
if '_' + name in self.__dict__:
return getattr(self, '_' + name)
else:
raise AttributeError(f'no {name}')
def __setattr__(self, name, value):
if name in self.get_frame_attr_names():
raise AttributeError(f"can't set frame attribute '{name}'")
else:
super().__setattr__(name, value)
|
3b57c4e9ce41b876d661ab04c2fb3c382bea88fd9a507f4f532c0026a26f21ce | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the fundamental classes used for representing
coordinates in astropy.
"""
import warnings
from collections import namedtuple
import numpy as np
from . import angle_formats as form
from astropy import units as u
from astropy.utils import isiterable
__all__ = ['Angle', 'Latitude', 'Longitude']
# these are used by the `hms` and `dms` attributes
hms_tuple = namedtuple('hms_tuple', ('h', 'm', 's'))
dms_tuple = namedtuple('dms_tuple', ('d', 'm', 's'))
signed_dms_tuple = namedtuple('signed_dms_tuple', ('sign', 'd', 'm', 's'))
class Angle(u.SpecificTypeQuantity):
"""
One or more angular value(s) with units equivalent to radians or degrees.
An angle can be specified either as an array, scalar, tuple (see
below), string, `~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports a variety of formats.
The examples below illustrate common ways of initializing an `Angle`
object. First some imports::
>>> from astropy.coordinates import Angle
>>> from astropy import units as u
The angle values can now be provided::
>>> Angle('10.2345d')
<Angle 10.2345 deg>
>>> Angle(['10.2345d', '-20d'])
<Angle [ 10.2345, -20. ] deg>
>>> Angle('1:2:30.43 degrees')
<Angle 1.04178611 deg>
>>> Angle('1 2 0 hours')
<Angle 1.03333333 hourangle>
>>> Angle(np.arange(1, 8), unit=u.deg)
<Angle [1., 2., 3., 4., 5., 6., 7.] deg>
>>> Angle('1°2′3″')
<Angle 1.03416667 deg>
>>> Angle('1°2′3″N')
<Angle 1.03416667 deg>
>>> Angle('1d2m3.4s')
<Angle 1.03427778 deg>
>>> Angle('1d2m3.4sS')
<Angle -1.03427778 deg>
>>> Angle('-1h2m3s')
<Angle -1.03416667 hourangle>
>>> Angle('-1h2m3sE')
<Angle -1.03416667 hourangle>
>>> Angle('-1h2.5m')
<Angle -1.04166667 hourangle>
>>> Angle('-1h2.5mW')
<Angle 1.04166667 hourangle>
>>> Angle('-1:2.5', unit=u.deg)
<Angle -1.04166667 deg>
>>> Angle(10.2345 * u.deg)
<Angle 10.2345 deg>
>>> Angle(Angle(10.2345 * u.deg))
<Angle 10.2345 deg>
Parameters
----------
angle : `~numpy.array`, scalar, `~astropy.units.Quantity`, :class:`~astropy.coordinates.Angle`
The angle value. If a tuple, will be interpreted as ``(h, m,
s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it
will be interpreted following the rules described above.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
"""
_equivalent_unit = u.radian
_include_easy_conversion_members = True
def __new__(cls, angle, unit=None, dtype=np.inexact, copy=True, **kwargs):
if not isinstance(angle, u.Quantity):
if unit is not None:
unit = cls._convert_unit_to_angle_unit(u.Unit(unit))
if isinstance(angle, tuple):
angle = cls._tuple_to_float(angle, unit)
elif isinstance(angle, str):
angle, angle_unit = form.parse_angle(angle, unit)
if angle_unit is None:
angle_unit = unit
if isinstance(angle, tuple):
if angle_unit == u.hourangle:
form._check_hour_range(angle[0])
form._check_minute_range(angle[1])
a = np.abs(angle[0]) + angle[1] / 60.
if len(angle) == 3:
form._check_second_range(angle[2])
a += angle[2] / 3600.
angle = np.copysign(a, angle[0])
if angle_unit is not unit:
# Possible conversion to `unit` will be done below.
angle = u.Quantity(angle, angle_unit, copy=False)
elif (isiterable(angle) and
not (isinstance(angle, np.ndarray) and
angle.dtype.kind not in 'SUVO')):
angle = [Angle(x, unit, copy=False) for x in angle]
return super().__new__(cls, angle, unit, dtype=dtype, copy=copy,
**kwargs)
@staticmethod
def _tuple_to_float(angle, unit):
"""
Converts an angle represented as a 3-tuple or 2-tuple into a floating
point number in the given unit.
"""
# TODO: Numpy array of tuples?
if unit == u.hourangle:
return form.hms_to_hours(*angle)
elif unit == u.degree:
return form.dms_to_degrees(*angle)
else:
raise u.UnitsError(f"Can not parse '{angle}' as unit '{unit}'")
@staticmethod
def _convert_unit_to_angle_unit(unit):
return u.hourangle if unit is u.hour else unit
def _set_unit(self, unit):
super()._set_unit(self._convert_unit_to_angle_unit(unit))
@property
def hour(self):
"""
The angle's value in hours (read-only property).
"""
return self.hourangle
@property
def hms(self):
"""
The angle's value in hours, as a named tuple with ``(h, m, s)``
members. (This is a read-only property.)
"""
return hms_tuple(*form.hours_to_hms(self.hourangle))
@property
def dms(self):
"""
The angle's value in degrees, as a named tuple with ``(d, m, s)``
members. (This is a read-only property.)
"""
return dms_tuple(*form.degrees_to_dms(self.degree))
@property
def signed_dms(self):
"""
The angle's value in degrees, as a named tuple with ``(sign, d, m, s)``
members. The ``d``, ``m``, ``s`` are thus always positive, and the sign of
the angle is given by ``sign``. (This is a read-only property.)
This is primarily intended for use with `dms` to generate string
representations of coordinates that are correct for negative angles.
"""
return signed_dms_tuple(np.sign(self.degree),
*form.degrees_to_dms(np.abs(self.degree)))
def to_string(self, unit=None, decimal=False, sep='fromunit',
precision=None, alwayssign=False, pad=False,
fields=3, format=None):
""" A string representation of the angle.
Parameters
----------
unit : `~astropy.units.UnitBase`, optional
Specifies the unit. Must be an angular unit. If not
provided, the unit used to initialize the angle will be
used.
decimal : bool, optional
If `True`, a decimal representation will be used, otherwise
the returned string will be in sexagesimal form.
sep : str, optional
The separator between numbers in a sexagesimal
representation. E.g., if it is ':', the result is
``'12:41:11.1241'``. Also accepts 2 or 3 separators. E.g.,
``sep='hms'`` would give the result ``'12h41m11.1241s'``, or
sep='-:' would yield ``'11-21:17.124'``. Alternatively, the
special string 'fromunit' means 'dms' if the unit is
degrees, or 'hms' if the unit is hours.
precision : int, optional
The level of decimal precision. If ``decimal`` is `True`,
this is the raw precision, otherwise it gives the
precision of the last place of the sexagesimal
representation (seconds). If `None`, or not provided, the
number of decimal places is determined by the value, and
will be between 0-8 decimal places as required.
alwayssign : bool, optional
If `True`, include the sign no matter what. If `False`,
only include the sign if it is negative.
pad : bool, optional
If `True`, include leading zeros when needed to ensure a
fixed number of characters for sexagesimal representation.
fields : int, optional
Specifies the number of fields to display when outputting
sexagesimal notation. For example:
- fields == 1: ``'5d'``
- fields == 2: ``'5d45m'``
- fields == 3: ``'5d45m32.5s'``
By default, all fields are displayed.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
- 'latex_inline': Return a LaTeX-formatted string which is the
same as with ``format='latex'`` for |Angle| instances
- 'unicode': Return a string containing non-ASCII unicode
characters, such as the degree symbol
Returns
-------
strrepr : str or array
A string representation of the angle. If the angle is an array, this
will be an array with a unicode dtype.
"""
if unit is None:
unit = self.unit
else:
unit = self._convert_unit_to_angle_unit(u.Unit(unit))
separators = {
None: {
u.degree: 'dms',
u.hourangle: 'hms'},
'latex': {
u.degree: [r'^\circ', r'{}^\prime', r'{}^{\prime\prime}'],
u.hourangle: [r'^{\mathrm{h}}', r'^{\mathrm{m}}', r'^{\mathrm{s}}']},
'unicode': {
u.degree: '°′″',
u.hourangle: 'ʰᵐˢ'}
}
# 'latex_inline' provides no functionality beyond what 'latex' offers,
# but it should be implemented to avoid ValueErrors in user code.
separators['latex_inline'] = separators['latex']
if sep == 'fromunit':
if format not in separators:
raise ValueError(f"Unknown format '{format}'")
seps = separators[format]
if unit in seps:
sep = seps[unit]
# Create an iterator so we can format each element of what
# might be an array.
if unit is u.degree:
if decimal:
values = self.degree
if precision is not None:
func = ("{0:0." + str(precision) + "f}").format
else:
func = '{:g}'.format
else:
if sep == 'fromunit':
sep = 'dms'
values = self.degree
func = lambda x: form.degrees_to_string(
x, precision=precision, sep=sep, pad=pad,
fields=fields)
elif unit is u.hourangle:
if decimal:
values = self.hour
if precision is not None:
func = ("{0:0." + str(precision) + "f}").format
else:
func = '{:g}'.format
else:
if sep == 'fromunit':
sep = 'hms'
values = self.hour
func = lambda x: form.hours_to_string(
x, precision=precision, sep=sep, pad=pad,
fields=fields)
elif unit.is_equivalent(u.radian):
if decimal:
values = self.to_value(unit)
if precision is not None:
func = ("{0:1." + str(precision) + "f}").format
else:
func = "{:g}".format
elif sep == 'fromunit':
values = self.to_value(unit)
unit_string = unit.to_string(format=format)
if format == 'latex' or format == 'latex_inline':
unit_string = unit_string[1:-1]
if precision is not None:
def plain_unit_format(val):
return ("{0:0." + str(precision) + "f}{1}").format(
val, unit_string)
func = plain_unit_format
else:
def plain_unit_format(val):
return f"{val:g}{unit_string}"
func = plain_unit_format
else:
raise ValueError(
f"'{unit.name}' can not be represented in sexagesimal notation")
else:
raise u.UnitsError(
"The unit value provided is not an angular unit.")
def do_format(val):
# Check if value is not nan to avoid ValueErrors when turning it into
# a hexagesimal string.
if not np.isnan(val):
s = func(float(val))
if alwayssign and not s.startswith('-'):
s = '+' + s
if format == 'latex' or format == 'latex_inline':
s = f'${s}$'
return s
s = f"{val}"
return s
format_ufunc = np.vectorize(do_format, otypes=['U'])
result = format_ufunc(values)
if result.ndim == 0:
result = result[()]
return result
def _wrap_at(self, wrap_angle):
"""
Implementation that assumes ``angle`` is already validated
and that wrapping is inplace.
"""
# Convert the wrap angle and 360 degrees to the native unit of
# this Angle, then do all the math on raw Numpy arrays rather
# than Quantity objects for speed.
a360 = u.degree.to(self.unit, 360.0)
wrap_angle = wrap_angle.to_value(self.unit)
wrap_angle_floor = wrap_angle - a360
self_angle = self.view(np.ndarray)
# Do the wrapping, but only if any angles need to be wrapped
#
# This invalid catch block is needed both for the floor division
# and for the comparisons later on (latter not really needed
# any more for >= 1.19 (NUMPY_LT_1_19), but former is).
with np.errstate(invalid='ignore'):
wraps = (self_angle - wrap_angle_floor) // a360
np.nan_to_num(wraps, copy=False)
if np.any(wraps != 0):
self_angle -= wraps*a360
# Rounding errors can cause problems.
self_angle[self_angle >= wrap_angle] -= a360
self_angle[self_angle < wrap_angle_floor] += a360
def wrap_at(self, wrap_angle, inplace=False):
"""
Wrap the `~astropy.coordinates.Angle` object at the given ``wrap_angle``.
This method forces all the angle values to be within a contiguous
360 degree range so that ``wrap_angle - 360d <= angle <
wrap_angle``. By default a new Angle object is returned, but if the
``inplace`` argument is `True` then the `~astropy.coordinates.Angle`
object is wrapped in place and nothing is returned.
For instance::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20.0, 150.0, 350.0] * u.deg)
>>> a.wrap_at(360 * u.deg).degree # Wrap into range 0 to 360 degrees # doctest: +FLOAT_CMP
array([340., 150., 350.])
>>> a.wrap_at('180d', inplace=True) # Wrap into range -180 to 180 degrees # doctest: +FLOAT_CMP
>>> a.degree # doctest: +FLOAT_CMP
array([-20., 150., -10.])
Parameters
----------
wrap_angle : angle-like
Specifies a single value for the wrap angle. This can be any
object that can initialize an `~astropy.coordinates.Angle` object,
e.g. ``'180d'``, ``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
inplace : bool
If `True` then wrap the object in place instead of returning
a new `~astropy.coordinates.Angle`
Returns
-------
out : Angle or None
If ``inplace is False`` (default), return new
`~astropy.coordinates.Angle` object with angles wrapped accordingly.
Otherwise wrap in place and return `None`.
"""
wrap_angle = Angle(wrap_angle, copy=False) # Convert to an Angle
if not inplace:
self = self.copy()
self._wrap_at(wrap_angle)
return None if inplace else self
def is_within_bounds(self, lower=None, upper=None):
"""
Check if all angle(s) satisfy ``lower <= angle < upper``
If ``lower`` is not specified (or `None`) then no lower bounds check is
performed. Likewise ``upper`` can be left unspecified. For example::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20, 150, 350] * u.deg)
>>> a.is_within_bounds('0d', '360d')
False
>>> a.is_within_bounds(None, '360d')
True
>>> a.is_within_bounds(-30 * u.deg, None)
True
Parameters
----------
lower : angle-like or None
Specifies lower bound for checking. This can be any object
that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
upper : angle-like or None
Specifies upper bound for checking. This can be any object
that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
Returns
-------
is_within_bounds : bool
`True` if all angles satisfy ``lower <= angle < upper``
"""
ok = True
if lower is not None:
ok &= np.all(Angle(lower) <= self)
if ok and upper is not None:
ok &= np.all(self < Angle(upper))
return bool(ok)
def _str_helper(self, format=None):
if self.isscalar:
return self.to_string(format=format)
def formatter(x):
return x.to_string(format=format)
return np.array2string(self, formatter={'all': formatter})
def __str__(self):
return self._str_helper()
def _repr_latex_(self):
return self._str_helper(format='latex')
def _no_angle_subclass(obj):
"""Return any Angle subclass objects as an Angle objects.
This is used to ensure that Latitude and Longitude change to Angle
objects when they are used in calculations (such as lon/2.)
"""
if isinstance(obj, tuple):
return tuple(_no_angle_subclass(_obj) for _obj in obj)
return obj.view(Angle) if isinstance(obj, (Latitude, Longitude)) else obj
class Latitude(Angle):
"""
Latitude-like angle(s) which must be in the range -90 to +90 deg.
A Latitude object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of being constrained
so that::
-90.0 * u.deg <= angle(s) <= +90.0 * u.deg
Any attempt to set a value outside that range will result in a
`ValueError`.
The input angle(s) can be specified either as an array, list,
scalar, tuple (see below), string,
:class:`~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : array, list, scalar, `~astropy.units.Quantity`, `~astropy.coordinates.Angle`
The angle value(s). If a tuple, will be interpreted as ``(h, m, s)``
or ``(d, m, s)`` depending on ``unit``. If a string, it will be
interpreted following the rules described for
:class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Longitude`.
"""
def __new__(cls, angle, unit=None, **kwargs):
# Forbid creating a Lat from a Long.
if isinstance(angle, Longitude):
raise TypeError("A Latitude angle cannot be created from a Longitude angle")
self = super().__new__(cls, angle, unit=unit, **kwargs)
self._validate_angles()
return self
def _validate_angles(self, angles=None):
"""Check that angles are between -90 and 90 degrees.
If not given, the check is done on the object itself"""
# Convert the lower and upper bounds to the "native" unit of
# this angle. This limits multiplication to two values,
# rather than the N values in `self.value`. Also, the
# comparison is performed on raw arrays, rather than Quantity
# objects, for speed.
if angles is None:
angles = self
lower = u.degree.to(angles.unit, -90.0)
upper = u.degree.to(angles.unit, 90.0)
# This invalid catch block can be removed when the minimum numpy
# version is >= 1.19 (NUMPY_LT_1_19)
with np.errstate(invalid='ignore'):
invalid_angles = (np.any(angles.value < lower) or
np.any(angles.value > upper))
if invalid_angles:
raise ValueError('Latitude angle(s) must be within -90 deg <= angle <= 90 deg, '
'got {}'.format(angles.to(u.degree)))
def __setitem__(self, item, value):
# Forbid assigning a Long to a Lat.
if isinstance(value, Longitude):
raise TypeError("A Longitude angle cannot be assigned to a Latitude angle")
# first check bounds
if value is not np.ma.masked:
self._validate_angles(value)
super().__setitem__(item, value)
# Any calculation should drop to Angle
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
class LongitudeInfo(u.QuantityInfo):
_represent_as_dict_attrs = u.QuantityInfo._represent_as_dict_attrs + ('wrap_angle',)
class Longitude(Angle):
"""
Longitude-like angle(s) which are wrapped within a contiguous 360 degree range.
A ``Longitude`` object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of a ``wrap_angle``
property. The ``wrap_angle`` specifies that all angle values
represented by the object will be in the range::
wrap_angle - 360 * u.deg <= angle(s) < wrap_angle
The default ``wrap_angle`` is 360 deg. Setting ``wrap_angle=180 *
u.deg`` would instead result in values between -180 and +180 deg.
Setting the ``wrap_angle`` attribute of an existing ``Longitude``
object will result in re-wrapping the angle values in-place.
The input angle(s) can be specified either as an array, list,
scalar, tuple, string, :class:`~astropy.units.Quantity`
or another :class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : tuple or angle-like
The angle value(s). If a tuple, will be interpreted as ``(h, m s)`` or
``(d, m, s)`` depending on ``unit``. If a string, it will be interpreted
following the rules described for :class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like ['angle'], optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
wrap_angle : angle-like or None, optional
Angle at which to wrap back to ``wrap_angle - 360 deg``.
If ``None`` (default), it will be taken to be 360 deg unless ``angle``
has a ``wrap_angle`` attribute already (i.e., is a ``Longitude``),
in which case it will be taken from there.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Latitude`.
"""
_wrap_angle = None
_default_wrap_angle = Angle(360 * u.deg)
info = LongitudeInfo()
def __new__(cls, angle, unit=None, wrap_angle=None, **kwargs):
# Forbid creating a Long from a Lat.
if isinstance(angle, Latitude):
raise TypeError("A Longitude angle cannot be created from "
"a Latitude angle.")
self = super().__new__(cls, angle, unit=unit, **kwargs)
if wrap_angle is None:
wrap_angle = getattr(angle, 'wrap_angle', self._default_wrap_angle)
self.wrap_angle = wrap_angle # angle-like b/c property setter
return self
def __setitem__(self, item, value):
# Forbid assigning a Lat to a Long.
if isinstance(value, Latitude):
raise TypeError("A Latitude angle cannot be assigned to a Longitude angle")
super().__setitem__(item, value)
self._wrap_at(self.wrap_angle)
@property
def wrap_angle(self):
return self._wrap_angle
@wrap_angle.setter
def wrap_angle(self, value):
self._wrap_angle = Angle(value, copy=False)
self._wrap_at(self.wrap_angle)
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._wrap_angle = getattr(obj, '_wrap_angle',
self._default_wrap_angle)
# Any calculation should drop to Angle
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
|
d6acd7d0446179f26dc4d7affdb79cba7f6df42823282b6e0f1a54fd4362a535 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Dependencies
import numpy as np
# Project
from astropy import units as u
from astropy.utils import ShapedLikeNDArray
__all__ = ['Attribute', 'TimeAttribute', 'QuantityAttribute',
'EarthLocationAttribute', 'CoordinateAttribute',
'CartesianRepresentationAttribute',
'DifferentialAttribute']
class Attribute:
"""A non-mutable data descriptor to hold a frame attribute.
This class must be used to define frame attributes (e.g. ``equinox`` or
``obstime``) that are included in a frame class definition.
Examples
--------
The `~astropy.coordinates.FK4` class uses the following class attributes::
class FK4(BaseCoordinateFrame):
equinox = TimeAttribute(default=_EQUINOX_B1950)
obstime = TimeAttribute(default=None,
secondary_attribute='equinox')
This means that ``equinox`` and ``obstime`` are available to be set as
keyword arguments when creating an ``FK4`` class instance and are then
accessible as instance attributes. The instance value for the attribute
must be stored in ``'_' + <attribute_name>`` by the frame ``__init__``
method.
Note in this example that ``equinox`` and ``obstime`` are time attributes
and use the ``TimeAttributeFrame`` class. This subclass overrides the
``convert_input`` method to validate and convert inputs into a ``Time``
object.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
name = '<unbound>'
def __init__(self, default=None, secondary_attribute=''):
self.default = default
self.secondary_attribute = secondary_attribute
super().__init__()
def __set_name__(self, owner, name):
self.name = name
def convert_input(self, value):
"""
Validate the input ``value`` and convert to expected attribute class.
The base method here does nothing, but subclasses can implement this
as needed. The method should catch any internal exceptions and raise
ValueError with an informative message.
The method returns the validated input along with a boolean that
indicates whether the input value was actually converted. If the input
value was already the correct type then the ``converted`` return value
should be ``False``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
output_value : object
The ``value`` converted to the correct type (or just ``value`` if
``converted`` is False)
converted : bool
True if the conversion was actually performed, False otherwise.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
return value, False
def __get__(self, instance, frame_cls=None):
if instance is None:
out = self.default
else:
out = getattr(instance, '_' + self.name, self.default)
if out is None:
out = getattr(instance, self.secondary_attribute, self.default)
out, converted = self.convert_input(out)
if instance is not None:
instance_shape = getattr(instance, 'shape', None) # None if instance (frame) has no data!
if instance_shape is not None and (getattr(out, 'shape', ()) and
out.shape != instance_shape):
# If the shapes do not match, try broadcasting.
try:
if isinstance(out, ShapedLikeNDArray):
out = out._apply(np.broadcast_to, shape=instance_shape,
subok=True)
else:
out = np.broadcast_to(out, instance_shape, subok=True)
except ValueError:
# raise more informative exception.
raise ValueError(
"attribute {} should be scalar or have shape {}, "
"but is has shape {} and could not be broadcast."
.format(self.name, instance_shape, out.shape))
converted = True
if converted:
setattr(instance, '_' + self.name, out)
return out
def __set__(self, instance, val):
raise AttributeError('Cannot set frame attribute')
class TimeAttribute(Attribute):
"""
Frame attribute descriptor for quantities that are Time objects.
See the `~astropy.coordinates.Attribute` API doc for further
information.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Convert input value to a Time object and validate by running through
the Time constructor. Also check that the input was a scalar.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
from astropy.time import Time
if value is None:
return None, False
if isinstance(value, Time):
out = value
converted = False
else:
try:
out = Time(value)
except Exception as err:
raise ValueError(
f'Invalid time input {self.name}={value!r}.') from err
converted = True
# Set attribute as read-only for arrays (not allowed by numpy
# for array scalars)
if out.shape:
out.writeable = False
return out, converted
class CartesianRepresentationAttribute(Attribute):
"""
A frame attribute that is a CartesianRepresentation with specified units.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit-like or None
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
"""
def __init__(self, default=None, secondary_attribute='', unit=None):
super().__init__(default, secondary_attribute)
self.unit = unit
def convert_input(self, value):
"""
Checks that the input is a CartesianRepresentation with the correct
unit, or the special value ``[0, 0, 0]``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out : object
The correctly-typed object.
converted : boolean
A boolean which indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if (isinstance(value, list) and len(value) == 3 and
all(v == 0 for v in value) and self.unit is not None):
return CartesianRepresentation(np.zeros(3) * self.unit), True
else:
# is it a CartesianRepresentation with correct unit?
if hasattr(value, 'xyz') and value.xyz.unit == self.unit:
return value, False
converted = True
# if it's a CartesianRepresentation, get the xyz Quantity
value = getattr(value, 'xyz', value)
if not hasattr(value, 'unit'):
raise TypeError('tried to set a {} with something that does '
'not have a unit.'
.format(self.__class__.__name__))
value = value.to(self.unit)
# now try and make a CartesianRepresentation.
cartrep = CartesianRepresentation(value, copy=False)
return cartrep, converted
class QuantityAttribute(Attribute):
"""
A frame attribute that is a quantity with specified units and shape
(optionally).
Can be `None`, which should be used for special cases in associated
frame transformations like "this quantity should be ignored" or similar.
Parameters
----------
default : number or `~astropy.units.Quantity` or None, optional
Default value for the attribute if the user does not supply one. If a
Quantity, it must be consistent with ``unit``, or if a value, ``unit``
cannot be None.
secondary_attribute : str, optional
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit-like or None, optional
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
shape : tuple or None, optional
If given, specifies the shape the attribute must be
"""
def __init__(self, default=None, secondary_attribute='', unit=None,
shape=None):
if default is None and unit is None:
raise ValueError('Either a default quantity value must be '
'provided, or a unit must be provided to define a '
'QuantityAttribute.')
if default is not None and unit is None:
unit = default.unit
self.unit = unit
self.shape = shape
default = self.convert_input(default)[0]
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
if (not hasattr(value, 'unit') and self.unit != u.dimensionless_unscaled
and np.any(value != 0)):
raise TypeError('Tried to set a QuantityAttribute with '
'something that does not have a unit.')
oldvalue = value
value = u.Quantity(oldvalue, self.unit, copy=False)
if self.shape is not None and value.shape != self.shape:
if value.shape == () and oldvalue == 0:
# Allow a single 0 to fill whatever shape is needed.
value = np.broadcast_to(value, self.shape, subok=True)
else:
raise ValueError(
f'The provided value has shape "{value.shape}", but '
f'should have shape "{self.shape}"')
converted = oldvalue is not value
return value, converted
class EarthLocationAttribute(Attribute):
"""
A frame attribute that can act as a `~astropy.coordinates.EarthLocation`.
It can be created as anything that can be transformed to the
`~astropy.coordinates.ITRS` frame, but always presents as an `EarthLocation`
when accessed after creation.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
elif isinstance(value, EarthLocation):
return value, False
else:
# we have to do the import here because of some tricky circular deps
from .builtin_frames import ITRS
if not hasattr(value, 'transform_to'):
raise ValueError('"{}" was passed into an '
'EarthLocationAttribute, but it does not have '
'"transform_to" method'.format(value))
itrsobj = value.transform_to(ITRS())
return itrsobj.earth_location, True
class CoordinateAttribute(Attribute):
"""
A frame attribute which is a coordinate object. It can be given as a
`~astropy.coordinates.SkyCoord` or a low-level frame instance. If a
low-level frame instance is provided, it will always be upgraded to be a
`~astropy.coordinates.SkyCoord` to ensure consistent transformation
behavior. The coordinate object will always be returned as a low-level
frame instance when accessed.
Parameters
----------
frame : `~astropy.coordinates.BaseCoordinateFrame` class
The type of frame this attribute can be
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, frame, default=None, secondary_attribute=''):
self._frame = frame
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a SkyCoord with the necessary units (or the
special value ``None``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
from astropy.coordinates import SkyCoord
if value is None:
return None, False
elif isinstance(value, self._frame):
return value, False
else:
value = SkyCoord(value) # always make the value a SkyCoord
transformedobj = value.transform_to(self._frame)
return transformedobj.frame, True
class DifferentialAttribute(Attribute):
"""A frame attribute which is a differential instance.
The optional ``allowed_classes`` argument allows specifying a restricted
set of valid differential classes to check the input against. Otherwise,
any `~astropy.coordinates.BaseDifferential` subclass instance is valid.
Parameters
----------
default : object
Default value for the attribute if not provided
allowed_classes : tuple, optional
A list of allowed differential classes for this attribute to have.
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, default=None, allowed_classes=None,
secondary_attribute=''):
if allowed_classes is not None:
self.allowed_classes = tuple(allowed_classes)
else:
self.allowed_classes = BaseDifferential
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a differential object and is one of the
allowed class types.
Parameters
----------
value : object
Input value.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
if not isinstance(value, self.allowed_classes):
if len(self.allowed_classes) == 1:
value = self.allowed_classes[0](value)
else:
raise TypeError('Tried to set a DifferentialAttribute with '
'an unsupported Differential type {}. Allowed '
'classes are: {}'
.format(value.__class__,
self.allowed_classes))
return value, True
# do this here to prevent a series of complicated circular imports
from .earth import EarthLocation
from .representation import CartesianRepresentation, BaseDifferential
|
b8d236ebfac62ef8667dff66db09e4a5f2383ef0d4cc4dc94621c7d31a86f687 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Standard library
import re
import textwrap
import warnings
from datetime import datetime
from urllib.request import urlopen, Request
# Third-party
from astropy import time as atime
from astropy.utils.console import color_print, _color_text
from . import get_sun
__all__ = []
class HumanError(ValueError):
pass
class CelestialError(ValueError):
pass
def get_sign(dt):
"""
"""
if ((int(dt.month) == 12 and int(dt.day) >= 22)or(int(dt.month) == 1 and int(dt.day) <= 19)):
zodiac_sign = "capricorn"
elif ((int(dt.month) == 1 and int(dt.day) >= 20)or(int(dt.month) == 2 and int(dt.day) <= 17)):
zodiac_sign = "aquarius"
elif ((int(dt.month) == 2 and int(dt.day) >= 18)or(int(dt.month) == 3 and int(dt.day) <= 19)):
zodiac_sign = "pisces"
elif ((int(dt.month) == 3 and int(dt.day) >= 20)or(int(dt.month) == 4 and int(dt.day) <= 19)):
zodiac_sign = "aries"
elif ((int(dt.month) == 4 and int(dt.day) >= 20)or(int(dt.month) == 5 and int(dt.day) <= 20)):
zodiac_sign = "taurus"
elif ((int(dt.month) == 5 and int(dt.day) >= 21)or(int(dt.month) == 6 and int(dt.day) <= 20)):
zodiac_sign = "gemini"
elif ((int(dt.month) == 6 and int(dt.day) >= 21)or(int(dt.month) == 7 and int(dt.day) <= 22)):
zodiac_sign = "cancer"
elif ((int(dt.month) == 7 and int(dt.day) >= 23)or(int(dt.month) == 8 and int(dt.day) <= 22)):
zodiac_sign = "leo"
elif ((int(dt.month) == 8 and int(dt.day) >= 23)or(int(dt.month) == 9 and int(dt.day) <= 22)):
zodiac_sign = "virgo"
elif ((int(dt.month) == 9 and int(dt.day) >= 23)or(int(dt.month) == 10 and int(dt.day) <= 22)):
zodiac_sign = "libra"
elif ((int(dt.month) == 10 and int(dt.day) >= 23)or(int(dt.month) == 11 and int(dt.day) <= 21)):
zodiac_sign = "scorpio"
elif ((int(dt.month) == 11 and int(dt.day) >= 22)or(int(dt.month) == 12 and int(dt.day) <= 21)):
zodiac_sign = "sagittarius"
return zodiac_sign
_VALID_SIGNS = ["capricorn", "aquarius", "pisces", "aries", "taurus", "gemini",
"cancer", "leo", "virgo", "libra", "scorpio", "sagittarius"]
# Some of the constellation names map to different astrological "sign names".
# Astrologers really needs to talk to the IAU...
_CONST_TO_SIGNS = {'capricornus': 'capricorn', 'scorpius': 'scorpio'}
_ZODIAC = ((1900, "rat"), (1901, "ox"), (1902, "tiger"),
(1903, "rabbit"), (1904, "dragon"), (1905, "snake"),
(1906, "horse"), (1907, "goat"), (1908, "monkey"),
(1909, "rooster"), (1910, "dog"), (1911, "pig"))
# https://stackoverflow.com/questions/12791871/chinese-zodiac-python-program
def _get_zodiac(yr):
return _ZODIAC[(yr - _ZODIAC[0][0]) % 12][1]
def horoscope(birthday, corrected=True, chinese=False):
"""
Enter your birthday as an `astropy.time.Time` object and
receive a mystical horoscope about things to come.
Parameters
----------
birthday : `astropy.time.Time` or str
Your birthday as a `datetime.datetime` or `astropy.time.Time` object
or "YYYY-MM-DD"string.
corrected : bool
Whether to account for the precession of the Earth instead of using the
ancient Greek dates for the signs. After all, you do want your *real*
horoscope, not a cheap inaccurate approximation, right?
chinese : bool
Chinese annual zodiac wisdom instead of Western one.
Returns
-------
Infinite wisdom, condensed into astrologically precise prose.
Notes
-----
This function was implemented on April 1. Take note of that date.
"""
from bs4 import BeautifulSoup
today = datetime.now()
err_msg = "Invalid response from celestial gods (failed to load horoscope)."
headers = {'User-Agent': 'foo/bar'}
special_words = {
'([sS]tar[s^ ]*)': 'yellow',
'([yY]ou[^ ]*)': 'magenta',
'([pP]lay[^ ]*)': 'blue',
'([hH]eart)': 'red',
'([fF]ate)': 'lightgreen',
}
if isinstance(birthday, str):
birthday = datetime.strptime(birthday, '%Y-%m-%d')
if chinese:
# TODO: Make this more accurate by using the actual date, not just year
# Might need third-party tool like https://pypi.org/project/lunardate
zodiac_sign = _get_zodiac(birthday.year)
url = ('https://www.horoscope.com/us/horoscopes/yearly/'
'{}-chinese-horoscope-{}.aspx'.format(today.year, zodiac_sign))
summ_title_sfx = f'in {today.year}'
try:
res = Request(url, headers=headers)
with urlopen(res) as f:
try:
doc = BeautifulSoup(f, 'html.parser')
# TODO: Also include Love, Family & Friends, Work, Money, More?
item = doc.find(id='overview')
desc = item.getText()
except Exception:
raise CelestialError(err_msg)
except Exception:
raise CelestialError(err_msg)
else:
birthday = atime.Time(birthday)
if corrected:
with warnings.catch_warnings():
warnings.simplefilter('ignore') # Ignore ErfaWarning
zodiac_sign = get_sun(birthday).get_constellation().lower()
zodiac_sign = _CONST_TO_SIGNS.get(zodiac_sign, zodiac_sign)
if zodiac_sign not in _VALID_SIGNS:
raise HumanError('On your birthday the sun was in {}, which is not '
'a sign of the zodiac. You must not exist. Or '
'maybe you can settle for '
'corrected=False.'.format(zodiac_sign.title()))
else:
zodiac_sign = get_sign(birthday.to_datetime())
url = f"http://www.astrology.com/us/horoscope/daily-overview.aspx?sign={zodiac_sign}"
summ_title_sfx = f"on {today.strftime('%Y-%m-%d')}"
res = Request(url, headers=headers)
with urlopen(res) as f:
try:
doc = BeautifulSoup(f, 'html.parser')
item = doc.find('div', {'id': 'content'})
desc = item.getText()
except Exception:
raise CelestialError(err_msg)
print("*"*79)
color_print(f"Horoscope for {zodiac_sign.capitalize()} {summ_title_sfx}:",
'green')
print("*"*79)
for block in textwrap.wrap(desc, 79):
split_block = block.split()
for i, word in enumerate(split_block):
for re_word in special_words.keys():
match = re.search(re_word, word)
if match is None:
continue
split_block[i] = _color_text(match.groups()[0], special_words[re_word])
print(" ".join(split_block))
def inject_horoscope():
import astropy
astropy._yourfuture = horoscope
inject_horoscope()
|
6b50d8f392d524d972cbed9aa80649d3bbbdcee096b0e733d99a6d8010bd4119 | """Implements the wrapper for the Astropy test runner.
This is for backward-compatibility for other downstream packages and can be removed
once astropy-helpers has reached end-of-life.
"""
import os
import stat
import shutil
import subprocess
import sys
import tempfile
from contextlib import contextmanager
from setuptools import Command
from astropy.logger import log
@contextmanager
def _suppress_stdout():
'''
A context manager to temporarily disable stdout.
Used later when installing a temporary copy of astropy to avoid a
very verbose output.
'''
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
class FixRemoteDataOption(type):
"""
This metaclass is used to catch cases where the user is running the tests
with --remote-data. We've now changed the --remote-data option so that it
takes arguments, but we still want --remote-data to work as before and to
enable all remote tests. With this metaclass, we can modify sys.argv
before setuptools try to parse the command-line options.
"""
def __init__(cls, name, bases, dct):
try:
idx = sys.argv.index('--remote-data')
except ValueError:
pass
else:
sys.argv[idx] = '--remote-data=any'
try:
idx = sys.argv.index('-R')
except ValueError:
pass
else:
sys.argv[idx] = '-R=any'
return super().__init__(name, bases, dct)
class AstropyTest(Command, metaclass=FixRemoteDataOption):
description = 'Run the tests for this package'
user_options = [
('package=', 'P',
"The name of a specific package to test, e.g. 'io.fits' or 'utils'. "
"Accepts comma separated string to specify multiple packages. "
"If nothing is specified, all default tests are run."),
('test-path=', 't',
'Specify a test location by path. If a relative path to a .py file, '
'it is relative to the built package, so e.g., a leading "astropy/" '
'is necessary. If a relative path to a .rst file, it is relative to '
'the directory *below* the --docs-path directory, so a leading '
'"docs/" is usually necessary. May also be an absolute path.'),
('verbose-results', 'V',
'Turn on verbose output from pytest.'),
('plugins=', 'p',
'Plugins to enable when running pytest.'),
('pastebin=', 'b',
"Enable pytest pastebin output. Either 'all' or 'failed'."),
('args=', 'a',
'Additional arguments to be passed to pytest.'),
('remote-data=', 'R', 'Run tests that download remote data. Should be '
'one of none/astropy/any (defaults to none).'),
('pep8', '8',
'Enable PEP8 checking and disable regular tests. '
'Requires the pytest-pep8 plugin.'),
('pdb', 'd',
'Start the interactive Python debugger on errors.'),
('coverage', 'c',
'Create a coverage report. Requires the coverage package.'),
('open-files', 'o', 'Fail if any tests leave files open. Requires the '
'psutil package.'),
('parallel=', 'j',
'Run the tests in parallel on the specified number of '
'CPUs. If "auto", all the cores on the machine will be '
'used. Requires the pytest-xdist plugin.'),
('docs-path=', None,
'The path to the documentation .rst files. If not provided, and '
'the current directory contains a directory called "docs", that '
'will be used.'),
('skip-docs', None,
"Don't test the documentation .rst files."),
('repeat=', None,
'How many times to repeat each test (can be used to check for '
'sporadic failures).'),
('temp-root=', None,
'The root directory in which to create the temporary testing files. '
'If unspecified the system default is used (e.g. /tmp) as explained '
'in the documentation for tempfile.mkstemp.'),
('verbose-install', None,
'Turn on terminal output from the installation of astropy in a '
'temporary folder.'),
('readonly', None,
'Make the temporary installation being tested read-only.')
]
package_name = ''
def initialize_options(self):
self.package = None
self.test_path = None
self.verbose_results = False
self.plugins = None
self.pastebin = None
self.args = None
self.remote_data = 'none'
self.pep8 = False
self.pdb = False
self.coverage = False
self.open_files = False
self.parallel = 0
self.docs_path = None
self.skip_docs = False
self.repeat = None
self.temp_root = None
self.verbose_install = False
self.readonly = False
def finalize_options(self):
# Normally we would validate the options here, but that's handled in
# run_tests
pass
def generate_testing_command(self):
"""
Build a Python script to run the tests.
"""
cmd_pre = '' # Commands to run before the test function
cmd_post = '' # Commands to run after the test function
if self.coverage:
pre, post = self._generate_coverage_commands()
cmd_pre += pre
cmd_post += post
set_flag = "import builtins; builtins._ASTROPY_TEST_ = True"
cmd = ('{cmd_pre}{0}; import {1.package_name}, sys; result = ('
'{1.package_name}.test('
'package={1.package!r}, '
'test_path={1.test_path!r}, '
'args={1.args!r}, '
'plugins={1.plugins!r}, '
'verbose={1.verbose_results!r}, '
'pastebin={1.pastebin!r}, '
'remote_data={1.remote_data!r}, '
'pep8={1.pep8!r}, '
'pdb={1.pdb!r}, '
'open_files={1.open_files!r}, '
'parallel={1.parallel!r}, '
'docs_path={1.docs_path!r}, '
'skip_docs={1.skip_docs!r}, '
'add_local_eggs_to_path=True, ' # see _build_temp_install below
'repeat={1.repeat!r})); '
'{cmd_post}'
'sys.exit(result)')
return cmd.format(set_flag, self, cmd_pre=cmd_pre, cmd_post=cmd_post)
def run(self):
"""
Run the tests!
"""
# Install the runtime dependencies.
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
# Ensure there is a doc path
if self.docs_path is None:
cfg_docs_dir = self.distribution.get_option_dict('build_docs').get('source_dir', None)
# Some affiliated packages use this.
# See astropy/package-template#157
if cfg_docs_dir is not None and os.path.exists(cfg_docs_dir[1]):
self.docs_path = os.path.abspath(cfg_docs_dir[1])
# fall back on a default path of "docs"
elif os.path.exists('docs'): # pragma: no cover
self.docs_path = os.path.abspath('docs')
# Build a testing install of the package
self._build_temp_install()
# Install the test dependencies
# NOTE: we do this here after _build_temp_install because there is
# a weird but which occurs if psutil is installed in this way before
# astropy is built, Cython can have segmentation fault. Strange, eh?
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
# Copy any additional dependencies that may have been installed via
# tests_requires or install_requires. We then pass the
# add_local_eggs_to_path=True option to package.test() to make sure the
# eggs get included in the path.
if os.path.exists('.eggs'):
shutil.copytree('.eggs', os.path.join(self.testing_path, '.eggs'))
# This option exists so that we can make sure that the tests don't
# write to an installed location.
if self.readonly:
log.info('changing permissions of temporary installation to read-only')
self._change_permissions_testing_path(writable=False)
# Run everything in a try: finally: so that the tmp dir gets deleted.
try:
# Construct this modules testing command
cmd = self.generate_testing_command()
# Run the tests in a subprocess--this is necessary since
# new extension modules may have appeared, and this is the
# easiest way to set up a new environment
testproc = subprocess.Popen(
[sys.executable, '-c', cmd],
cwd=self.testing_path, close_fds=False)
retcode = testproc.wait()
except KeyboardInterrupt:
import signal
# If a keyboard interrupt is handled, pass it to the test
# subprocess to prompt pytest to initiate its teardown
testproc.send_signal(signal.SIGINT)
retcode = testproc.wait()
finally:
# Remove temporary directory
if self.readonly:
self._change_permissions_testing_path(writable=True)
shutil.rmtree(self.tmp_dir)
raise SystemExit(retcode)
def _build_temp_install(self):
"""
Install the package and to a temporary directory for the purposes of
testing. This allows us to test the install command, include the
entry points, and also avoids creating pyc and __pycache__ directories
inside the build directory
"""
# On OSX the default path for temp files is under /var, but in most
# cases on OSX /var is actually a symlink to /private/var; ensure we
# dereference that link, because pytest is very sensitive to relative
# paths...
tmp_dir = tempfile.mkdtemp(prefix=self.package_name + '-test-',
dir=self.temp_root)
self.tmp_dir = os.path.realpath(tmp_dir)
log.info(f'installing to temporary directory: {self.tmp_dir}')
# We now install the package to the temporary directory. We do this
# rather than build and copy because this will ensure that e.g. entry
# points work.
self.reinitialize_command('install')
install_cmd = self.distribution.get_command_obj('install')
install_cmd.prefix = self.tmp_dir
if self.verbose_install:
self.run_command('install')
else:
with _suppress_stdout():
self.run_command('install')
# We now get the path to the site-packages directory that was created
# inside self.tmp_dir
install_cmd = self.get_finalized_command('install')
self.testing_path = install_cmd.install_lib
# Ideally, docs_path is set properly in run(), but if it is still
# not set here, do not pretend it is, otherwise bad things happen.
# See astropy/package-template#157
if self.docs_path is not None:
new_docs_path = os.path.join(self.testing_path,
os.path.basename(self.docs_path))
shutil.copytree(self.docs_path, new_docs_path)
self.docs_path = new_docs_path
shutil.copy('setup.cfg', self.testing_path)
def _change_permissions_testing_path(self, writable=False):
if writable:
basic_flags = stat.S_IRUSR | stat.S_IWUSR
else:
basic_flags = stat.S_IRUSR
for root, dirs, files in os.walk(self.testing_path):
for dirname in dirs:
os.chmod(os.path.join(root, dirname), basic_flags | stat.S_IXUSR)
for filename in files:
os.chmod(os.path.join(root, filename), basic_flags)
def _generate_coverage_commands(self):
"""
This method creates the post and pre commands if coverage is to be
generated
"""
if self.parallel != 0:
raise ValueError(
"--coverage can not be used with --parallel")
try:
import coverage # pylint: disable=W0611
except ImportError:
raise ImportError(
"--coverage requires that the coverage package is "
"installed.")
# Don't use get_pkg_data_filename here, because it
# requires importing astropy.config and thus screwing
# up coverage results for those packages.
coveragerc = os.path.join(
self.testing_path, self.package_name.replace('.', '/'),
'tests', 'coveragerc')
with open(coveragerc) as fd:
coveragerc_content = fd.read()
coveragerc_content = coveragerc_content.replace(
"{packagename}", self.package_name.replace('.', '/'))
tmp_coveragerc = os.path.join(self.tmp_dir, 'coveragerc')
with open(tmp_coveragerc, 'wb') as tmp:
tmp.write(coveragerc_content.encode('utf-8'))
cmd_pre = (
'import coverage; '
'cov = coverage.coverage(data_file=r"{}", config_file=r"{}"); '
'cov.start();'.format(
os.path.abspath(".coverage"), os.path.abspath(tmp_coveragerc)))
cmd_post = (
'cov.stop(); '
'from astropy.tests.helper import _save_coverage; '
'_save_coverage(cov, result, r"{}", r"{}");'.format(
os.path.abspath('.'), os.path.abspath(self.testing_path)))
return cmd_pre, cmd_post
|
313368b85e2e874f7216a08dbacf4b098ab67c0cc845562731d3ed4da53c1bcf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides the tools used to internally run the astropy test suite
from the installed astropy. It makes use of the `pytest`_ testing framework.
"""
import os
import sys
import pickle
import warnings
import functools
import pytest
from astropy.units import allclose as quantity_allclose # noqa: F401
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import (AstropyDeprecationWarning,
AstropyPendingDeprecationWarning)
# For backward-compatibility with affiliated packages
from .runner import TestRunner # pylint: disable=W0611 # noqa
__all__ = ['assert_follows_unicode_guidelines',
'assert_quantity_allclose', 'check_pickling_recovery',
'pickle_protocol', 'generic_recursive_equality_test']
def _save_coverage(cov, result, rootdir, testing_path):
"""
This method is called after the tests have been run in coverage mode
to cleanup and then save the coverage data and report.
"""
from astropy.utils.console import color_print
if result != 0:
return
# The coverage report includes the full path to the temporary
# directory, so we replace all the paths with the true source
# path. Note that this will not work properly for packages that still
# rely on 2to3.
try:
# Coverage 4.0: _harvest_data has been renamed to get_data, the
# lines dict is private
cov.get_data()
except AttributeError:
# Coverage < 4.0
cov._harvest_data()
lines = cov.data.lines
else:
lines = cov.data._lines
for key in list(lines.keys()):
new_path = os.path.relpath(
os.path.realpath(key),
os.path.realpath(testing_path))
new_path = os.path.abspath(
os.path.join(rootdir, new_path))
lines[new_path] = lines.pop(key)
color_print('Saving coverage data in .coverage...', 'green')
cov.save()
color_print('Saving HTML coverage report in htmlcov...', 'green')
cov.html_report(directory=os.path.join(rootdir, 'htmlcov'))
@deprecated('5.1', alternative='pytest.raises')
class raises:
"""
A decorator to mark that a test should raise a given exception.
Use as follows::
@raises(ZeroDivisionError)
def test_foo():
x = 1/0
This can also be used a context manager, in which case it is just
an alias for the ``pytest.raises`` context manager (because the
two have the same name this help avoid confusion by being
flexible).
.. note:: Usage of ``pytest.raises`` is preferred.
"""
# pep-8 naming exception -- this is a decorator class
def __init__(self, exc):
self._exc = exc
self._ctx = None
def __call__(self, func):
@functools.wraps(func)
def run_raises_test(*args, **kwargs):
pytest.raises(self._exc, func, *args, **kwargs)
return run_raises_test
def __enter__(self):
self._ctx = pytest.raises(self._exc)
return self._ctx.__enter__()
def __exit__(self, *exc_info):
return self._ctx.__exit__(*exc_info)
# TODO: Remove these when deprecation period of things deprecated in PR 12633 are removed.
_deprecations_as_exceptions = False
_include_astropy_deprecations = True
_modules_to_ignore_on_import = {
r'compiler', # A deprecated stdlib module used by pytest
r'scipy',
r'pygments',
r'ipykernel',
r'IPython', # deprecation warnings for async and await
r'setuptools'}
_warnings_to_ignore_entire_module = set()
_warnings_to_ignore_by_pyver = {
None: { # Python version agnostic
# https://github.com/astropy/astropy/pull/7372
(r"Importing from numpy\.testing\.decorators is deprecated, "
r"import from numpy\.testing instead\.", DeprecationWarning),
# inspect raises this slightly different warning on Python 3.7.
# Keeping it since e.g. lxml as of 3.8.0 is still calling getargspec()
(r"inspect\.getargspec\(\) is deprecated, use "
r"inspect\.signature\(\) or inspect\.getfullargspec\(\)",
DeprecationWarning),
# https://github.com/astropy/pytest-doctestplus/issues/29
(r"split\(\) requires a non-empty pattern match", FutureWarning),
# Package resolution warning that we can do nothing about
(r"can't resolve package from __spec__ or __package__, "
r"falling back on __name__ and __path__", ImportWarning)},
(3, 7): {
# Deprecation warning for collections.abc, fixed in Astropy but still
# used in lxml, and maybe others
(r"Using or importing the ABCs from 'collections'",
DeprecationWarning)}
}
@deprecated('5.1', alternative='https://docs.pytest.org/en/stable/warnings.html')
def enable_deprecations_as_exceptions(include_astropy_deprecations=True,
modules_to_ignore_on_import=[],
warnings_to_ignore_entire_module=[],
warnings_to_ignore_by_pyver={}):
"""
Turn on the feature that turns deprecations into exceptions.
Parameters
----------
include_astropy_deprecations : bool
If set to `True`, ``AstropyDeprecationWarning`` and
``AstropyPendingDeprecationWarning`` are also turned into exceptions.
modules_to_ignore_on_import : list of str
List of additional modules that generate deprecation warnings
on import, which are to be ignored. By default, these are already
included: ``compiler``, ``scipy``, ``pygments``, ``ipykernel``, and
``setuptools``.
warnings_to_ignore_entire_module : list of str
List of modules with deprecation warnings to ignore completely,
not just during import. If ``include_astropy_deprecations=True``
is given, ``AstropyDeprecationWarning`` and
``AstropyPendingDeprecationWarning`` are also ignored for the modules.
warnings_to_ignore_by_pyver : dict
Dictionary mapping tuple of ``(major, minor)`` Python version to
a list of ``(warning_message, warning_class)`` to ignore.
Python version-agnostic warnings should be mapped to `None` key.
This is in addition of those already ignored by default
(see ``_warnings_to_ignore_by_pyver`` values).
"""
global _deprecations_as_exceptions
_deprecations_as_exceptions = True
global _include_astropy_deprecations
_include_astropy_deprecations = include_astropy_deprecations
global _modules_to_ignore_on_import
_modules_to_ignore_on_import.update(modules_to_ignore_on_import)
global _warnings_to_ignore_entire_module
_warnings_to_ignore_entire_module.update(warnings_to_ignore_entire_module)
global _warnings_to_ignore_by_pyver
for key, val in warnings_to_ignore_by_pyver.items():
if key in _warnings_to_ignore_by_pyver:
_warnings_to_ignore_by_pyver[key].update(val)
else:
_warnings_to_ignore_by_pyver[key] = set(val)
@deprecated('5.1', alternative='https://docs.pytest.org/en/stable/warnings.html')
def treat_deprecations_as_exceptions():
"""
Turn all DeprecationWarnings (which indicate deprecated uses of
Python itself or Numpy, but not within Astropy, where we use our
own deprecation warning class) into exceptions so that we find
out about them early.
This completely resets the warning filters and any "already seen"
warning state.
"""
# First, totally reset the warning state. The modules may change during
# this iteration thus we copy the original state to a list to iterate
# on. See https://github.com/astropy/astropy/pull/5513.
for module in list(sys.modules.values()):
try:
del module.__warningregistry__
except Exception:
pass
if not _deprecations_as_exceptions:
return
warnings.resetwarnings()
# Hide the next couple of DeprecationWarnings
warnings.simplefilter('ignore', DeprecationWarning)
# Here's the wrinkle: a couple of our third-party dependencies
# (pytest and scipy) are still using deprecated features
# themselves, and we'd like to ignore those. Fortunately, those
# show up only at import time, so if we import those things *now*,
# before we turn the warnings into exceptions, we're golden.
for m in _modules_to_ignore_on_import:
try:
__import__(m)
except ImportError:
pass
# Now, start over again with the warning filters
warnings.resetwarnings()
# Now, turn these warnings into exceptions
_all_warns = [DeprecationWarning, FutureWarning, ImportWarning]
# Only turn astropy deprecation warnings into exceptions if requested
if _include_astropy_deprecations:
_all_warns += [AstropyDeprecationWarning,
AstropyPendingDeprecationWarning]
for w in _all_warns:
warnings.filterwarnings("error", ".*", w)
# This ignores all specified warnings from given module(s),
# not just on import, for use of Astropy affiliated packages.
for m in _warnings_to_ignore_entire_module:
for w in _all_warns:
warnings.filterwarnings('ignore', category=w, module=m)
# This ignores only specified warnings by Python version, if applicable.
for v in _warnings_to_ignore_by_pyver:
if v is None or sys.version_info[:2] == v:
for s in _warnings_to_ignore_by_pyver[v]:
warnings.filterwarnings("ignore", s[0], s[1])
@deprecated('5.1', alternative='pytest.warns')
class catch_warnings(warnings.catch_warnings):
"""
A high-powered version of warnings.catch_warnings to use for testing
and to make sure that there is no dependence on the order in which
the tests are run.
This completely blitzes any memory of any warnings that have
appeared before so that all warnings will be caught and displayed.
``*args`` is a set of warning classes to collect. If no arguments are
provided, all warnings are collected.
Use as follows::
with catch_warnings(MyCustomWarning) as w:
do.something.bad()
assert len(w) > 0
.. note:: Usage of :ref:`pytest.warns <pytest:warns>` is preferred.
"""
def __init__(self, *classes):
super().__init__(record=True)
self.classes = classes
def __enter__(self):
warning_list = super().__enter__()
treat_deprecations_as_exceptions()
if len(self.classes) == 0:
warnings.simplefilter('always')
else:
warnings.simplefilter('ignore')
for cls in self.classes:
warnings.simplefilter('always', cls)
return warning_list
def __exit__(self, type, value, traceback):
treat_deprecations_as_exceptions()
@deprecated('5.1', alternative='pytest.mark.filterwarnings')
class ignore_warnings(catch_warnings):
"""
This can be used either as a context manager or function decorator to
ignore all warnings that occur within a function or block of code.
An optional category option can be supplied to only ignore warnings of a
certain category or categories (if a list is provided).
"""
def __init__(self, category=None):
super().__init__()
if isinstance(category, type) and issubclass(category, Warning):
self.category = [category]
else:
self.category = category
def __call__(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Originally this just reused self, but that doesn't work if the
# function is called more than once so we need to make a new
# context manager instance for each call
with self.__class__(category=self.category):
return func(*args, **kwargs)
return wrapper
def __enter__(self):
retval = super().__enter__()
if self.category is not None:
for category in self.category:
warnings.simplefilter('ignore', category)
else:
warnings.simplefilter('ignore')
return retval
def assert_follows_unicode_guidelines(
x, roundtrip=None):
"""
Test that an object follows our Unicode policy. See
"Unicode guidelines" in the coding guidelines.
Parameters
----------
x : object
The instance to test
roundtrip : module, optional
When provided, this namespace will be used to evaluate
``repr(x)`` and ensure that it roundtrips. It will also
ensure that ``__bytes__(x)`` roundtrip.
If not provided, no roundtrip testing will be performed.
"""
from astropy import conf
with conf.set_temp('unicode_output', False):
bytes_x = bytes(x)
unicode_x = str(x)
repr_x = repr(x)
assert isinstance(bytes_x, bytes)
bytes_x.decode('ascii')
assert isinstance(unicode_x, str)
unicode_x.encode('ascii')
assert isinstance(repr_x, str)
if isinstance(repr_x, bytes):
repr_x.decode('ascii')
else:
repr_x.encode('ascii')
if roundtrip is not None:
assert x.__class__(bytes_x) == x
assert x.__class__(unicode_x) == x
assert eval(repr_x, roundtrip) == x
with conf.set_temp('unicode_output', True):
bytes_x = bytes(x)
unicode_x = str(x)
repr_x = repr(x)
assert isinstance(bytes_x, bytes)
bytes_x.decode('ascii')
assert isinstance(unicode_x, str)
assert isinstance(repr_x, str)
if isinstance(repr_x, bytes):
repr_x.decode('ascii')
else:
repr_x.encode('ascii')
if roundtrip is not None:
assert x.__class__(bytes_x) == x
assert x.__class__(unicode_x) == x
assert eval(repr_x, roundtrip) == x
@pytest.fixture(params=[0, 1, -1])
def pickle_protocol(request):
"""
Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced).
(Originally from astropy.table.tests.test_pickle)
"""
return request.param
def generic_recursive_equality_test(a, b, class_history):
"""
Check if the attributes of a and b are equal. Then,
check if the attributes of the attributes are equal.
"""
dict_a = a.__getstate__() if hasattr(a, '__getstate__') else a.__dict__
dict_b = b.__dict__
for key in dict_a:
assert key in dict_b,\
f"Did not pickle {key}"
if dict_a[key].__class__.__eq__ is not object.__eq__:
# Only compare if the class defines a proper equality test.
# E.g., info does not define __eq__, and hence defers to
# object.__eq__, which is equivalent to checking that two
# instances are the same. This will generally not be true
# after pickling.
eq = (dict_a[key] == dict_b[key])
if '__iter__' in dir(eq):
eq = (False not in eq)
assert eq, f"Value of {key} changed by pickling"
if hasattr(dict_a[key], '__dict__'):
if dict_a[key].__class__ in class_history:
# attempt to prevent infinite recursion
pass
else:
new_class_history = [dict_a[key].__class__]
new_class_history.extend(class_history)
generic_recursive_equality_test(dict_a[key],
dict_b[key],
new_class_history)
def check_pickling_recovery(original, protocol):
"""
Try to pickle an object. If successful, make sure
the object's attributes survived pickling and unpickling.
"""
f = pickle.dumps(original, protocol=protocol)
unpickled = pickle.loads(f)
class_history = [original.__class__]
generic_recursive_equality_test(original, unpickled,
class_history)
def assert_quantity_allclose(actual, desired, rtol=1.e-7, atol=None,
**kwargs):
"""
Raise an assertion if two objects are not equal up to desired tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.testing.assert_allclose`.
"""
import numpy as np
from astropy.units.quantity import _unquantify_allclose_arguments
np.testing.assert_allclose(*_unquantify_allclose_arguments(
actual, desired, rtol, atol), **kwargs)
|
c6060beb2def2c61474e488ded6ae8e5e8674ad994aedc06f425f9d077661468 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import types
import warnings
import numpy as np
from astropy.units.core import Unit, UnitsError
from astropy.units.quantity import Quantity
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['Constant', 'EMConstant']
class ConstantMeta(type):
"""Metaclass for `~astropy.constants.Constant`. The primary purpose of this
is to wrap the double-underscore methods of `~astropy.units.Quantity`
which is the superclass of `~astropy.constants.Constant`.
In particular this wraps the operator overloads such as `__add__` to
prevent their use with constants such as ``e`` from being used in
expressions without specifying a system. The wrapper checks to see if the
constant is listed (by name) in ``Constant._has_incompatible_units``, a set
of those constants that are defined in different systems of units are
physically incompatible. It also performs this check on each `Constant` if
it hasn't already been performed (the check is deferred until the
`Constant` is actually used in an expression to speed up import times,
among other reasons).
"""
def __new__(mcls, name, bases, d):
def wrap(meth):
@functools.wraps(meth)
def wrapper(self, *args, **kwargs):
name_lower = self.name.lower()
instances = self._registry[name_lower]
if not self._checked_units:
for inst in instances.values():
try:
self.unit.to(inst.unit)
except UnitsError:
self._has_incompatible_units.add(name_lower)
self._checked_units = True
if (not self.system and
name_lower in self._has_incompatible_units):
systems = sorted(x for x in instances if x)
raise TypeError(
'Constant {!r} does not have physically compatible '
'units across all systems of units and cannot be '
'combined with other values without specifying a '
'system (eg. {}.{})'.format(self.abbrev, self.abbrev,
systems[0]))
return meth(self, *args, **kwargs)
return wrapper
# The wrapper applies to so many of the __ methods that it's easier to
# just exclude the ones it doesn't apply to
exclude = {'__new__', '__array_finalize__', '__array_wrap__',
'__dir__', '__getattr__', '__init__', '__str__',
'__repr__', '__hash__', '__iter__', '__getitem__',
'__len__', '__bool__', '__quantity_subclass__',
'__setstate__'}
for attr, value in vars(Quantity).items():
if (isinstance(value, types.FunctionType) and
attr.startswith('__') and attr.endswith('__') and
attr not in exclude):
d[attr] = wrap(value)
return super().__new__(mcls, name, bases, d)
class Constant(Quantity, metaclass=ConstantMeta):
"""A physical or astronomical constant.
These objects are quantities that are meant to represent physical
constants.
Parameters
----------
abbrev : str
A typical ASCII text abbreviation of the constant, generally
the same as the Python variable used for this constant.
name : str
Full constant name.
value : numbers.Real
Constant value. Note that this should be a bare number, not a
|Quantity|.
unit : str
String representation of the constant units.
uncertainty : numbers.Real
Absolute uncertainty in constant value. Note that this should be
a bare number, not a |Quantity|.
reference : str, optional
Reference where the value is taken from.
system : str
System of units in which the constant is defined. This can be
`None` when the constant's units can be directly converted
between systems.
"""
_registry = {}
_has_incompatible_units = set()
def __new__(cls, abbrev, name, value, unit, uncertainty,
reference=None, system=None):
if reference is None:
reference = getattr(cls, 'default_reference', None)
if reference is None:
raise TypeError(f"{cls} requires a reference.")
name_lower = name.lower()
instances = cls._registry.setdefault(name_lower, {})
# By-pass Quantity initialization, since units may not yet be
# initialized here, and we store the unit in string form.
inst = np.array(value).view(cls)
if system in instances:
warnings.warn('Constant {!r} already has a definition in the '
'{!r} system from {!r} reference'.format(
name, system, reference), AstropyUserWarning)
for c in instances.values():
if system is not None and not hasattr(c.__class__, system):
setattr(c, system, inst)
if c.system is not None and not hasattr(inst.__class__, c.system):
setattr(inst, c.system, c)
instances[system] = inst
inst._abbrev = abbrev
inst._name = name
inst._value = value
inst._unit_string = unit
inst._uncertainty = uncertainty
inst._reference = reference
inst._system = system
inst._checked_units = False
return inst
def __repr__(self):
return ('<{} name={!r} value={} uncertainty={} unit={!r} '
'reference={!r}>'.format(self.__class__, self.name, self.value,
self.uncertainty, str(self.unit),
self.reference))
def __str__(self):
return (' Name = {}\n'
' Value = {}\n'
' Uncertainty = {}\n'
' Unit = {}\n'
' Reference = {}'.format(self.name, self.value,
self.uncertainty, self.unit,
self.reference))
def __quantity_subclass__(self, unit):
return super().__quantity_subclass__(unit)[0], False
def copy(self):
"""
Return a copy of this `Constant` instance. Since they are by
definition immutable, this merely returns another reference to
``self``.
"""
return self
__deepcopy__ = __copy__ = copy
@property
def abbrev(self):
"""A typical ASCII text abbreviation of the constant, also generally
the same as the Python variable used for this constant.
"""
return self._abbrev
@property
def name(self):
"""The full name of the constant."""
return self._name
@lazyproperty
def _unit(self):
"""The unit(s) in which this constant is defined."""
return Unit(self._unit_string)
@property
def uncertainty(self):
"""The known absolute uncertainty in this constant's value."""
return self._uncertainty
@property
def reference(self):
"""The source used for the value of this constant."""
return self._reference
@property
def system(self):
"""The system of units in which this constant is defined (typically
`None` so long as the constant's units can be directly converted
between systems).
"""
return self._system
def _instance_or_super(self, key):
instances = self._registry[self.name.lower()]
inst = instances.get(key)
if inst is not None:
return inst
else:
return getattr(super(), key)
@property
def si(self):
"""If the Constant is defined in the SI system return that instance of
the constant, else convert to a Quantity in the appropriate SI units.
"""
return self._instance_or_super('si')
@property
def cgs(self):
"""If the Constant is defined in the CGS system return that instance of
the constant, else convert to a Quantity in the appropriate CGS units.
"""
return self._instance_or_super('cgs')
def __array_finalize__(self, obj):
for attr in ('_abbrev', '_name', '_value', '_unit_string',
'_uncertainty', '_reference', '_system'):
setattr(self, attr, getattr(obj, attr, None))
self._checked_units = getattr(obj, '_checked_units', False)
class EMConstant(Constant):
"""An electromagnetic constant."""
@property
def cgs(self):
"""Overridden for EMConstant to raise a `TypeError`
emphasizing that there are multiple EM extensions to CGS.
"""
raise TypeError("Cannot convert EM constants to cgs because there "
"are different systems for E.M constants within the "
"c.g.s system (ESU, Gaussian, etc.). Instead, "
"directly use the constant with the appropriate "
"suffix (e.g. e.esu, e.gauss, etc.).")
|
faab05af5a2418826ab2182b35a5cd0bf42e4ad760dce9e05ec19bb614c87909 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import annotations
import abc
import inspect
from typing import TYPE_CHECKING, Any, Mapping, TypeVar
import numpy as np
from astropy.io.registry import UnifiedReadWriteMethod
from astropy.utils.decorators import classproperty
from astropy.utils.metadata import MetaData
from .connect import CosmologyFromFormat, CosmologyRead, CosmologyToFormat, CosmologyWrite
from .parameter import Parameter
if TYPE_CHECKING: # pragma: no cover
from astropy.cosmology.funcs.comparison import _FormatType
# Originally authored by Andrew Becker ([email protected]),
# and modified by Neil Crighton ([email protected]), Roban Kramer
# ([email protected]), and Nathaniel Starkman ([email protected]).
# Many of these adapted from Hogg 1999, astro-ph/9905116
# and Linder 2003, PRL 90, 91301
__all__ = ["Cosmology", "CosmologyError", "FlatCosmologyMixin"]
__doctest_requires__ = {} # needed until __getattr__ removed
##############################################################################
# Parameters
# registry of cosmology classes with {key=name : value=class}
_COSMOLOGY_CLASSES = dict()
# typing
_CosmoT = TypeVar("_CosmoT", bound="Cosmology")
_FlatCosmoT = TypeVar("_FlatCosmoT", bound="FlatCosmologyMixin")
##############################################################################
class CosmologyError(Exception):
pass
class Cosmology(metaclass=abc.ABCMeta):
"""Base-class for all Cosmologies.
Parameters
----------
*args
Arguments into the cosmology; used by subclasses, not this base class.
name : str or None (optional, keyword-only)
The name of the cosmology.
meta : dict or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
**kwargs
Arguments into the cosmology; used by subclasses, not this base class.
Notes
-----
Class instances are static -- you cannot (and should not) change the values
of the parameters. That is, all of the above attributes (except meta) are
read only.
For details on how to create performant custom subclasses, see the
documentation on :ref:`astropy-cosmology-fast-integrals`.
"""
meta = MetaData()
# Unified I/O object interchange methods
from_format = UnifiedReadWriteMethod(CosmologyFromFormat)
to_format = UnifiedReadWriteMethod(CosmologyToFormat)
# Unified I/O read and write methods
read = UnifiedReadWriteMethod(CosmologyRead)
write = UnifiedReadWriteMethod(CosmologyWrite)
# Parameters
__parameters__: tuple[str, ...] = ()
__all_parameters__: tuple[str, ...] = ()
# ---------------------------------------------------------------
def __init_subclass__(cls):
super().__init_subclass__()
# -------------------
# Parameters
# Get parameters that are still Parameters, either in this class or above.
parameters = []
derived_parameters = []
for n in cls.__parameters__:
p = getattr(cls, n)
if isinstance(p, Parameter):
derived_parameters.append(n) if p.derived else parameters.append(n)
# Add new parameter definitions
for n, v in cls.__dict__.items():
if n in parameters or n.startswith("_") or not isinstance(v, Parameter):
continue
derived_parameters.append(n) if v.derived else parameters.append(n)
# reorder to match signature
ordered = [parameters.pop(parameters.index(n))
for n in cls._init_signature.parameters.keys()
if n in parameters]
parameters = ordered + parameters # place "unordered" at the end
cls.__parameters__ = tuple(parameters)
cls.__all_parameters__ = cls.__parameters__ + tuple(derived_parameters)
# -------------------
# register as a Cosmology subclass
_COSMOLOGY_CLASSES[cls.__qualname__] = cls
@classproperty(lazy=True)
def _init_signature(cls):
"""Initialization signature (without 'self')."""
# get signature, dropping "self" by taking arguments [1:]
sig = inspect.signature(cls.__init__)
sig = sig.replace(parameters=list(sig.parameters.values())[1:])
return sig
# ---------------------------------------------------------------
def __init__(self, name=None, meta=None):
self._name = str(name) if name is not None else name
self.meta.update(meta or {})
@property
def name(self):
"""The name of the Cosmology instance."""
return self._name
@property
@abc.abstractmethod
def is_flat(self):
"""
Return bool; `True` if the cosmology is flat.
This is abstract and must be defined in subclasses.
"""
raise NotImplementedError("is_flat is not implemented")
def clone(self, *, meta=None, **kwargs):
"""Returns a copy of this object with updated parameters, as specified.
This cannot be used to change the type of the cosmology, so ``clone()``
cannot be used to change between flat and non-flat cosmologies.
Parameters
----------
meta : mapping or None (optional, keyword-only)
Metadata that will update the current metadata.
**kwargs
Cosmology parameter (and name) modifications. If any parameter is
changed and a new name is not given, the name will be set to "[old
name] (modified)".
Returns
-------
newcosmo : `~astropy.cosmology.Cosmology` subclass instance
A new instance of this class with updated parameters as specified.
If no arguments are given, then a reference to this object is
returned instead of copy.
Examples
--------
To make a copy of the ``Planck13`` cosmology with a different matter
density (``Om0``), and a new name:
>>> from astropy.cosmology import Planck13
>>> Planck13.clone(name="Modified Planck 2013", Om0=0.35)
FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s),
Om0=0.35, ...
If no name is specified, the new name will note the modification.
>>> Planck13.clone(Om0=0.35).name
'Planck13 (modified)'
"""
# Quick return check, taking advantage of the Cosmology immutability.
if meta is None and not kwargs:
return self
# There are changed parameter or metadata values.
# The name needs to be changed accordingly, if it wasn't already.
_modname = self.name + " (modified)"
kwargs.setdefault("name", (_modname if self.name is not None else None))
# mix new meta into existing, preferring the former.
meta = meta if meta is not None else {}
new_meta = {**self.meta, **meta}
# Mix kwargs into initial arguments, preferring the former.
new_init = {**self._init_arguments, "meta": new_meta, **kwargs}
# Create BoundArgument to handle args versus kwargs.
# This also handles all errors from mismatched arguments
ba = self._init_signature.bind_partial(**new_init)
# Instantiate, respecting args vs kwargs
cloned = type(self)(*ba.args, **ba.kwargs)
# Check if nothing has changed.
# TODO! or should return self?
if (cloned.name == _modname) and not meta and cloned.is_equivalent(self):
cloned._name = self.name
return cloned
@property
def _init_arguments(self):
# parameters
kw = {n: getattr(self, n) for n in self.__parameters__}
# other info
kw["name"] = self.name
kw["meta"] = self.meta
return kw
# ---------------------------------------------------------------
# comparison methods
def is_equivalent(self, other: Any, /, *, format: _FormatType = False) -> bool:
r"""Check equivalence between Cosmologies.
Two cosmologies may be equivalent even if not the same class.
For example, an instance of ``LambdaCDM`` might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like ``FlatLambdaCDM``.
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance, positional-only
The object to which to compare.
format : bool or None or str, optional keyword-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be
equivalent to a Cosmology.
`False` (default) will not allow conversion. `True` or `None` will,
and will use the auto-identification to try to infer the correct
format. A `str` is assumed to be the correct format to use when
converting.
``format`` is broadcast to match the shape of ``other``.
Note that the cosmology arguments are not broadcast against
``format``, so it cannot determine the output shape.
Returns
-------
bool
True if cosmologies are equivalent, False otherwise.
Examples
--------
Two cosmologies may be equivalent even if not of the same class.
In this examples the ``LambdaCDM`` has ``Ode0`` set to the same value
calculated in ``FlatLambdaCDM``.
>>> import astropy.units as u
>>> from astropy.cosmology import LambdaCDM, FlatLambdaCDM
>>> cosmo1 = LambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, 0.7)
>>> cosmo2 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmo1.is_equivalent(cosmo2)
True
While in this example, the cosmologies are not equivalent.
>>> cosmo3 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, Tcmb0=3 * u.K)
>>> cosmo3.is_equivalent(cosmo2)
False
Also, using the keyword argument, the notion of equivalence is extended
to any Python object that can be converted to a |Cosmology|.
>>> from astropy.cosmology import Planck18
>>> tbl = Planck18.to_format("astropy.table")
>>> Planck18.is_equivalent(tbl, format=True)
True
The list of valid formats, e.g. the |Table| in this example, may be
checked with ``Cosmology.from_format.list_formats()``.
As can be seen in the list of formats, not all formats can be
auto-identified by ``Cosmology.from_format.registry``. Objects of
these kinds can still be checked for equivalence, but the correct
format string must be used.
>>> tbl = Planck18.to_format("yaml")
>>> Planck18.is_equivalent(tbl, format="yaml")
True
"""
from .funcs import cosmology_equal
try:
return cosmology_equal(self, other, format=(None, format), allow_equivalent=True)
except Exception:
# `is_equivalent` allows `other` to be any object and returns False
# if `other` cannot be converted to a Cosmology, rather than
# raising an Exception.
return False
def __equiv__(self, other: Any, /) -> bool:
"""Cosmology equivalence. Use ``.is_equivalent()`` for actual check!
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance, positional-only
The object in which to compare.
Returns
-------
bool or `NotImplemented`
`NotImplemented` if ``other`` is from a different class.
`True` if ``other`` is of the same class and has matching parameters
and parameter values.
`False` otherwise.
"""
if other.__class__ is not self.__class__:
return NotImplemented # allows other.__equiv__
# Check all parameters in 'other' match those in 'self' and 'other' has
# no extra parameters (latter part should never happen b/c same class)
params_eq = (set(self.__all_parameters__) == set(other.__all_parameters__)
and all(np.all(getattr(self, k) == getattr(other, k))
for k in self.__all_parameters__))
return params_eq
def __eq__(self, other: Any, /) -> bool:
"""Check equality between Cosmologies.
Checks the Parameters and immutable fields (i.e. not "meta").
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance, positional-only
The object in which to compare.
Returns
-------
bool
`True` if Parameters and names are the same, `False` otherwise.
"""
if other.__class__ is not self.__class__:
return NotImplemented # allows other.__eq__
eq = (
# non-Parameter checks: name
self.name == other.name
# check all parameters in 'other' match those in 'self' and 'other'
# has no extra parameters (latter part should never happen b/c same
# class) TODO! element-wise when there are array cosmologies
and set(self.__all_parameters__) == set(other.__all_parameters__)
and all(np.all(getattr(self, k) == getattr(other, k))
for k in self.__all_parameters__)
)
return eq
# ---------------------------------------------------------------
def __repr__(self):
namelead = f"{self.__class__.__qualname__}("
if self.name is not None:
namelead += f"name=\"{self.name}\", "
# nicely formatted parameters
fmtps = (f'{k}={getattr(self, k)}' for k in self.__parameters__)
return namelead + ", ".join(fmtps) + ")"
def __astropy_table__(self, cls, copy, **kwargs):
"""Return a `~astropy.table.Table` of type ``cls``.
Parameters
----------
cls : type
Astropy ``Table`` class or subclass.
copy : bool
Ignored.
**kwargs : dict, optional
Additional keyword arguments. Passed to ``self.to_format()``.
See ``Cosmology.to_format.help("astropy.table")`` for allowed kwargs.
Returns
-------
`astropy.table.Table` or subclass instance
Instance of type ``cls``.
"""
return self.to_format("astropy.table", cls=cls, **kwargs)
class FlatCosmologyMixin(metaclass=abc.ABCMeta):
"""
Mixin class for flat cosmologies. Do NOT instantiate directly.
Note that all instances of ``FlatCosmologyMixin`` are flat, but not all
flat cosmologies are instances of ``FlatCosmologyMixin``. As example,
``LambdaCDM`` **may** be flat (for the a specific set of parameter values),
but ``FlatLambdaCDM`` **will** be flat.
"""
__all_parameters__: tuple[str, ...]
__parameters__: tuple[str, ...]
def __init_subclass__(cls: type[_FlatCosmoT]) -> None:
super().__init_subclass__()
# Determine the non-flat class.
# This will raise a TypeError if the MRO is inconsistent.
cls.__nonflatclass__
# ===============================================================
@classmethod # TODO! make metaclass-method
def _get_nonflat_cls(cls, kls: type[_CosmoT] | None=None) -> type[Cosmology] | None:
"""Find the corresponding non-flat class.
The class' bases are searched recursively.
Parameters
----------
kls : :class:`astropy.cosmology.Cosmology` class or None, optional
If `None` (default) this class is searched instead of `kls`.
Raises
------
TypeError
If more than one non-flat class is found at the same level of the
inheritance. This is similar to the error normally raised by Python
for an inconsistent method resolution order.
Returns
-------
type
A :class:`Cosmology` subclass this class inherits from that is not a
:class:`FlatCosmologyMixin` subclass.
"""
_kls = cls if kls is None else kls
# Find non-flat classes
nonflat: set[type[Cosmology]]
nonflat = {b for b in _kls.__bases__
if issubclass(b, Cosmology) and not issubclass(b, FlatCosmologyMixin)}
if not nonflat: # e.g. subclassing FlatLambdaCDM
nonflat = {k for b in _kls.__bases__ if (k := cls._get_nonflat_cls(b)) is not None}
if len(nonflat) > 1:
raise TypeError(
f"cannot create a consistent non-flat class resolution order "
f"for {_kls} with bases {nonflat} at the same inheritance level."
)
if not nonflat: # e.g. FlatFLRWMixin(FlatCosmologyMixin)
return None
return nonflat.pop()
__nonflatclass__ = classproperty(_get_nonflat_cls, lazy=True,
doc="Return the corresponding non-flat class.")
# ===============================================================
@property
def is_flat(self):
"""Return `True`, the cosmology is flat."""
return True
@abc.abstractmethod
def nonflat(self: _FlatCosmoT) -> _CosmoT:
"""Return the equivalent non-flat-class instance of this cosmology."""
def clone(self, *, meta: Mapping | None = None, to_nonflat: bool = False, **kwargs):
"""Returns a copy of this object with updated parameters, as specified.
This cannot be used to change the type of the cosmology, except for
changing to the non-flat version of this cosmology.
Parameters
----------
meta : mapping or None (optional, keyword-only)
Metadata that will update the current metadata.
to_nonflat : bool, optional keyword-only
Whether to change to the non-flat version of this cosmology.
**kwargs
Cosmology parameter (and name) modifications. If any parameter is
changed and a new name is not given, the name will be set to "[old
name] (modified)".
Returns
-------
newcosmo : `~astropy.cosmology.Cosmology` subclass instance
A new instance of this class with updated parameters as specified.
If no arguments are given, then a reference to this object is
returned instead of copy.
Examples
--------
To make a copy of the ``Planck13`` cosmology with a different matter
density (``Om0``), and a new name:
>>> from astropy.cosmology import Planck13
>>> Planck13.clone(name="Modified Planck 2013", Om0=0.35)
FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s),
Om0=0.35, ...
If no name is specified, the new name will note the modification.
>>> Planck13.clone(Om0=0.35).name
'Planck13 (modified)'
The keyword 'to_nonflat' can be used to clone on the non-flat equivalent
cosmology.
>>> Planck13.clone(to_nonflat=True)
LambdaCDM(name="Planck13", ...
>>> Planck13.clone(H0=70, to_nonflat=True)
LambdaCDM(name="Planck13 (modified)", H0=70.0 km / (Mpc s), ...
"""
if to_nonflat:
return self.nonflat.clone(meta=meta, **kwargs)
return super().clone(meta=meta, **kwargs)
# ===============================================================
def __equiv__(self, other):
"""flat-|Cosmology| equivalence.
Use `astropy.cosmology.funcs.cosmology_equal` with
``allow_equivalent=True`` for actual checks!
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance
The object to which to compare for equivalence.
Returns
-------
bool or `NotImplemented`
`True` if ``other`` is of the same class / non-flat class (e.g.
|FlatLambdaCDM| and |LambdaCDM|) has matching parameters and
parameter values.
`False` if ``other`` is of the same class but has different
parameters.
`NotImplemented` otherwise.
"""
if isinstance(other, FlatCosmologyMixin):
return super().__equiv__(other) # super gets from Cosmology
# check if `other` is the non-flat version of this class this makes the
# assumption that any further subclass of a flat cosmo keeps the same
# physics.
if not issubclass(other.__class__, self.__nonflatclass__):
return NotImplemented
# Check if have equivalent parameters and all parameters in `other`
# match those in `self`` and `other`` has no extra parameters.
params_eq = (
set(self.__all_parameters__) == set(other.__all_parameters__) # no extra
# equal
and all(np.all(getattr(self, k) == getattr(other, k))
for k in self.__parameters__)
# flatness check
and other.is_flat
)
return params_eq
# -----------------------------------------------------------------------------
def __getattr__(attr):
from . import flrw
if hasattr(flrw, attr) and attr not in ("__path__", ):
import warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
f"`astropy.cosmology.core.{attr}` has been moved (since v5.0) and "
f"should be imported as ``from astropy.cosmology import {attr}``."
" In future this will raise an exception.",
AstropyDeprecationWarning
)
return getattr(flrw, attr)
raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
|
db8e84fb9e2f8fa5b11fb39c4e938c4efc9ecaf4f23c8c15d2bd298713e66638 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Cosmological units and equivalencies.
""" # (newline needed for unit summary)
import astropy.units as u
from astropy.units.utils import generate_unit_summary as _generate_unit_summary
__all__ = ["littleh", "redshift",
# redshift equivalencies
"dimensionless_redshift", "with_redshift",
"redshift_distance", "redshift_hubble", "redshift_temperature",
# other equivalencies
"with_H0"]
__doctest_requires__ = {('with_redshift', 'redshift_distance'): ['scipy']}
_ns = globals()
###############################################################################
# Cosmological Units
# This is not formally a unit, but is used in that way in many contexts, and
# an appropriate equivalency is only possible if it's treated as a unit.
redshift = u.def_unit(['redshift'], prefixes=False, namespace=_ns,
doc="Cosmological redshift.", format={'latex': r''})
# This is not formally a unit, but is used in that way in many contexts, and
# an appropriate equivalency is only possible if it's treated as a unit (see
# https://arxiv.org/pdf/1308.4150.pdf for more)
# Also note that h or h100 or h_100 would be a better name, but they either
# conflict or have numbers in them, which is disallowed
littleh = u.def_unit(['littleh'], namespace=_ns, prefixes=False,
doc='Reduced/"dimensionless" Hubble constant',
format={'latex': r'h_{100}'})
###############################################################################
# Equivalencies
def dimensionless_redshift():
"""Allow redshift to be 1-to-1 equivalent to dimensionless.
It is special compared to other equivalency pairs in that it
allows this independent of the power to which the redshift is raised,
and independent of whether it is part of a more complicated unit.
It is similar to u.dimensionless_angles() in this respect.
"""
return u.Equivalency([(redshift, None)], "dimensionless_redshift")
def redshift_distance(cosmology=None, kind="comoving", **atzkw):
"""Convert quantities between redshift and distance.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If None, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
kind : {'comoving', 'lookback', 'luminosity'} or None, optional
The distance type for the Equivalency.
Note this does NOT include the angular diameter distance as this
distance measure is not monotonic.
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
Equivalency between redshift and temperature.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> z = 1100 * cu.redshift
>>> z.to(u.Mpc, cu.redshift_distance(WMAP9, kind="comoving")) # doctest: +FLOAT_CMP
<Quantity 14004.03157418 Mpc>
"""
from astropy.cosmology import default_cosmology, z_at_value
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
allowed_kinds = ('comoving', 'lookback', 'luminosity')
if kind not in allowed_kinds:
raise ValueError(f"`kind` is not one of {allowed_kinds}")
method = getattr(cosmology, kind + "_distance")
def z_to_distance(z):
"""Redshift to distance."""
return method(z)
def distance_to_z(d):
"""Distance to redshift."""
return z_at_value(method, d << u.Mpc, **atzkw)
return u.Equivalency([(redshift, u.Mpc, z_to_distance, distance_to_z)],
"redshift_distance",
{'cosmology': cosmology, "distance": kind})
def redshift_hubble(cosmology=None, **atzkw):
"""Convert quantities between redshift and Hubble parameter and little-h.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If None, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
Equivalency between redshift and Hubble parameter and little-h unit.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> z = 1100 * cu.redshift
>>> equivalency = cu.redshift_hubble(WMAP9) # construct equivalency
>>> z.to(u.km / u.s / u.Mpc, equivalency) # doctest: +FLOAT_CMP
<Quantity 1565637.40154275 km / (Mpc s)>
>>> z.to(cu.littleh, equivalency) # doctest: +FLOAT_CMP
<Quantity 15656.37401543 littleh>
"""
from astropy.cosmology import default_cosmology, z_at_value
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
def z_to_hubble(z):
"""Redshift to Hubble parameter."""
return cosmology.H(z)
def hubble_to_z(H):
"""Hubble parameter to redshift."""
return z_at_value(cosmology.H, H << (u.km / u.s / u.Mpc), **atzkw)
def z_to_littleh(z):
"""Redshift to :math:`h`-unit Quantity."""
return z_to_hubble(z).to_value(u.km / u.s / u.Mpc) / 100 * littleh
def littleh_to_z(h):
""":math:`h`-unit Quantity to redshift."""
return hubble_to_z(h * 100)
return u.Equivalency([(redshift, u.km / u.s / u.Mpc, z_to_hubble, hubble_to_z),
(redshift, littleh, z_to_littleh, littleh_to_z)],
"redshift_hubble",
{'cosmology': cosmology})
def redshift_temperature(cosmology=None, **atzkw):
"""Convert quantities between redshift and CMB temperature.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If None, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
Equivalency between redshift and temperature.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> z = 1100 * cu.redshift
>>> z.to(u.K, cu.redshift_temperature(WMAP9))
<Quantity 3000.225 K>
"""
from astropy.cosmology import default_cosmology, z_at_value
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
def z_to_Tcmb(z):
return cosmology.Tcmb(z)
def Tcmb_to_z(T):
return z_at_value(cosmology.Tcmb, T << u.K, **atzkw)
return u.Equivalency([(redshift, u.K, z_to_Tcmb, Tcmb_to_z)],
"redshift_temperature",
{'cosmology': cosmology})
def with_redshift(cosmology=None, *,
distance="comoving", hubble=True, Tcmb=True,
atzkw=None):
"""Convert quantities between measures of cosmological distance.
Note: by default all equivalencies are on and must be explicitly turned off.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If `None`, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
distance : {'comoving', 'lookback', 'luminosity'} or None (optional, keyword-only)
The type of distance equivalency to create or `None`.
Default is 'comoving'.
hubble : bool (optional, keyword-only)
Whether to create a Hubble parameter <-> redshift equivalency, using
``Cosmology.H``. Default is `True`.
Tcmb : bool (optional, keyword-only)
Whether to create a CMB temperature <-> redshift equivalency, using
``Cosmology.Tcmb``. Default is `True`.
atzkw : dict or None (optional, keyword-only)
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
With equivalencies between redshift and distance / Hubble / temperature.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> equivalency = cu.with_redshift(WMAP9)
>>> z = 1100 * cu.redshift
Redshift to (comoving) distance:
>>> z.to(u.Mpc, equivalency) # doctest: +FLOAT_CMP
<Quantity 14004.03157418 Mpc>
Redshift to the Hubble parameter:
>>> z.to(u.km / u.s / u.Mpc, equivalency) # doctest: +FLOAT_CMP
<Quantity 1565637.40154275 km / (Mpc s)>
>>> z.to(cu.littleh, equivalency) # doctest: +FLOAT_CMP
<Quantity 15656.37401543 littleh>
Redshift to CMB temperature:
>>> z.to(u.K, equivalency)
<Quantity 3000.225 K>
"""
from astropy.cosmology import default_cosmology
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
atzkw = atzkw if atzkw is not None else {}
equivs = [] # will append as built
# Hubble <-> Redshift
if hubble:
equivs.extend(redshift_hubble(cosmology, **atzkw))
# CMB Temperature <-> Redshift
if Tcmb:
equivs.extend(redshift_temperature(cosmology, **atzkw))
# Distance <-> Redshift, but need to choose which distance
if distance is not None:
equivs.extend(redshift_distance(cosmology, kind=distance, **atzkw))
# -----------
return u.Equivalency(equivs, "with_redshift",
{'cosmology': cosmology,
'distance': distance, 'hubble': hubble, 'Tcmb': Tcmb})
# ===================================================================
def with_H0(H0=None):
"""
Convert between quantities with little-h and the equivalent physical units.
Parameters
----------
H0 : None or `~astropy.units.Quantity` ['frequency']
The value of the Hubble constant to assume. If a
`~astropy.units.Quantity`, will assume the quantity *is* ``H0``. If
`None` (default), use the ``H0`` attribute from
:mod:`~astropy.cosmology.default_cosmology`.
References
----------
For an illuminating discussion on why you may or may not want to use
little-h at all, see https://arxiv.org/pdf/1308.4150.pdf
"""
if H0 is None:
from .realizations import default_cosmology
H0 = default_cosmology.get().H0
h100_val_unit = u.Unit(100 / (H0.to_value((u.km / u.s) / u.Mpc)) * littleh)
return u.Equivalency([(h100_val_unit, None)], "with_H0", kwargs={"H0": H0})
# ===================================================================
# Enable the set of default equivalencies.
# If the cosmology package is imported, this is added to the list astropy-wide.
u.add_enabled_equivalencies(dimensionless_redshift())
# =============================================================================
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
if __doc__ is not None:
__doc__ += _generate_unit_summary(_ns)
|
e722e028f9d2e47f4f00df682cba40db5ba4211657542f13341c4c936ef184a6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import pathlib
import sys
from typing import Optional, Union
# LOCAL
from astropy.utils.data import get_pkg_data_path
from astropy.utils.decorators import deprecated
from astropy.utils.state import ScienceState
from .core import Cosmology
_COSMOLOGY_DATA_DIR = pathlib.Path(get_pkg_data_path("cosmology", "data", package="astropy"))
available = tuple(sorted(p.stem for p in _COSMOLOGY_DATA_DIR.glob("*.ecsv")))
__all__ = ["available", "default_cosmology"] + list(available)
__doctest_requires__ = {"*": ["scipy"]}
def __getattr__(name):
"""Make specific realizations from data files with lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
Raises
------
AttributeError
If "name" is not in :mod:`astropy.cosmology.realizations`
"""
if name not in available:
raise AttributeError(f"module {__name__!r} has no attribute {name!r}.")
cosmo = Cosmology.read(str(_COSMOLOGY_DATA_DIR / name) + ".ecsv", format="ascii.ecsv")
cosmo.__doc__ = (f"{name} instance of {cosmo.__class__.__qualname__} "
f"cosmology\n(from {cosmo.meta['reference']})")
# Cache in this module so `__getattr__` is only called once per `name`.
setattr(sys.modules[__name__], name, cosmo)
return cosmo
def __dir__():
"""Directory, including lazily-imported objects."""
return __all__
#########################################################################
# The science state below contains the current cosmology.
#########################################################################
class default_cosmology(ScienceState):
"""The default cosmology to use.
To change it::
>>> from astropy.cosmology import default_cosmology, WMAP7
>>> with default_cosmology.set(WMAP7):
... # WMAP7 cosmology in effect
... pass
Or, you may use a string::
>>> with default_cosmology.set('WMAP7'):
... # WMAP7 cosmology in effect
... pass
To get the default cosmology:
>>> default_cosmology.get()
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, ...
"""
_default_value = "Planck18"
_value = "Planck18"
@deprecated("5.0", alternative="get")
@classmethod
def get_cosmology_from_string(cls, arg):
"""Return a cosmology instance from a string."""
if arg == "no_default":
value = None
else:
value = cls._get_from_registry(arg)
return value
@classmethod
def validate(cls, value: Union[Cosmology, str, None]) -> Optional[Cosmology]:
"""Return a Cosmology given a value.
Parameters
----------
value : None, str, or `~astropy.cosmology.Cosmology`
Returns
-------
`~astropy.cosmology.Cosmology` instance
Raises
------
TypeError
If ``value`` is not a string or |Cosmology|.
"""
# None -> default
if value is None:
value = cls._default_value
# Parse to Cosmology. Error if cannot.
if isinstance(value, str):
# special-case one string
if value == "no_default":
value = None
else:
value = cls._get_from_registry(value)
elif not isinstance(value, Cosmology):
raise TypeError("default_cosmology must be a string or Cosmology instance, "
f"not {value}.")
return value
@classmethod
def _get_from_registry(cls, name: str) -> Cosmology:
"""Get a registered Cosmology realization.
Parameters
----------
name : str
The built-in |Cosmology| realization to retrieve.
Returns
-------
`astropy.cosmology.Cosmology`
The cosmology realization of `name`.
Raises
------
ValueError
If ``name`` is a str, but not for a built-in Cosmology.
TypeError
If ``name`` is for a non-Cosmology object.
"""
try:
value = getattr(sys.modules[__name__], name)
except AttributeError:
raise ValueError(f"Unknown cosmology {name!r}. "
f"Valid cosmologies:\n{available}")
if not isinstance(value, Cosmology):
raise TypeError(f"cannot find a Cosmology realization called {name}.")
return value
|
ac10eb76a4b9fa6607c87b0965e56fa5655546202883defc64ecf115a91b78a7 | from numpy.testing import assert_allclose
from astropy.stats.info_theory import bayesian_info_criterion, bayesian_info_criterion_lsq
from astropy.stats.info_theory import akaike_info_criterion, akaike_info_criterion_lsq
def test_bayesian_info_criterion():
# This test is from an example presented in Ref [1]
lnL = (-176.4, -173.0)
n_params = (2, 3)
n_samples = 100
answer = 2.195
bic_g = bayesian_info_criterion(lnL[0], n_params[0], n_samples)
bic_t = bayesian_info_criterion(lnL[1], n_params[1], n_samples)
assert_allclose(answer, bic_g - bic_t, atol=1e-1)
def test_akaike_info_criterion():
# This test is from an example presented in Ref [2]
n_samples = 121
lnL = (-3.54, -4.17)
n_params = (6, 5)
answer = 0.95
aic_1 = akaike_info_criterion(lnL[0], n_params[0], n_samples)
aic_2 = akaike_info_criterion(lnL[1], n_params[1], n_samples)
assert_allclose(answer, aic_1 - aic_2, atol=1e-2)
def test_akaike_info_criterion_lsq():
# This test is from an example presented in Ref [1]
n_samples = 100
n_params = (4, 3, 3)
ssr = (25.0, 26.0, 27.0)
answer = (-130.21, -128.46, -124.68)
assert_allclose(answer[0],
akaike_info_criterion_lsq(ssr[0], n_params[0], n_samples),
atol=1e-2)
assert_allclose(answer[1],
akaike_info_criterion_lsq(ssr[1], n_params[1], n_samples),
atol=1e-2)
assert_allclose(answer[2],
akaike_info_criterion_lsq(ssr[2], n_params[2], n_samples),
atol=1e-2)
def test_bayesian_info_criterion_lsq():
"""This test is from:
http://www.statoek.wiso.uni-goettingen.de/veranstaltungen/non_semi_models/
AkaikeLsg.pdf
Note that in there, they compute a "normalized BIC". Therefore, the
answers presented here are recalculated versions based on their values.
"""
n_samples = 25
n_params = (1, 2, 1)
ssr = (48959, 32512, 37980)
answer = (192.706, 185.706, 186.360)
assert_allclose(answer[0], bayesian_info_criterion_lsq(ssr[0],
n_params[0],
n_samples),
atol=1e-2)
assert_allclose(answer[1], bayesian_info_criterion_lsq(ssr[1],
n_params[1],
n_samples),
atol=1e-2)
assert_allclose(answer[2], bayesian_info_criterion_lsq(ssr[2],
n_params[2],
n_samples),
atol=1e-2)
|
7debf4a4f9e4cd4afc688b76f05d4173786af3b6d840a93df478dccc73dcaca0 | import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from astropy import units as u
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
from astropy.stats.circstats import _length, circmean, circvar, circmoment, circcorrcoef
from astropy.stats.circstats import rayleightest, vtest, vonmisesmle
def test__length():
# testing against R CircStats package
# Ref. [1] pages 6 and 125
weights = np.array([12, 1, 6, 1, 2, 1, 1])
answer = 0.766282
data = np.array([0, 3.6, 36, 72, 108, 169.2, 324])*u.deg
assert_allclose(answer, _length(data, weights=weights), atol=1e-4)
def test_circmean():
# testing against R CircStats package
# Ref[1], page 23
data = np.array([51, 67, 40, 109, 31, 358])*u.deg
answer = 48.63*u.deg
assert_equal(answer, np.around(circmean(data), 2))
@pytest.mark.skipif('not HAS_SCIPY')
def test_circmean_against_scipy():
import scipy.stats
# testing against scipy.stats.circmean function
# the data is the same as the test before, but in radians
data = np.array([0.89011792, 1.1693706, 0.6981317, 1.90240888, 0.54105207,
6.24827872])
answer = scipy.stats.circmean(data)
assert_equal(np.around(answer, 2), np.around(circmean(data), 2))
def test_circvar():
# testing against R CircStats package
# Ref[1], page 23
data = np.array([51, 67, 40, 109, 31, 358])*u.deg
answer = 0.1635635
assert_allclose(answer, circvar(data), atol=1e-4)
def test_circmoment():
# testing against R CircStats package
# Ref[1], page 23
data = np.array([51, 67, 40, 109, 31, 358])*u.deg
# 2nd, 3rd, and 4th moments
# this is the answer given in Ref[1] in radians
answer = np.array([1.588121, 1.963919, 2.685556])
answer = np.around(np.rad2deg(answer)*u.deg, 4)
result = (np.around(circmoment(data, p=2)[0], 4),
np.around(circmoment(data, p=3)[0], 4),
np.around(circmoment(data, p=4)[0], 4))
assert_equal(answer[0], result[0])
assert_equal(answer[1], result[1])
assert_equal(answer[2], result[2])
# testing lengths
answer = np.array([0.4800428, 0.236541, 0.2255761])
assert_allclose(answer, (circmoment(data, p=2)[1],
circmoment(data, p=3)[1],
circmoment(data, p=4)[1]), atol=1e-4)
def test_circcorrcoef():
# testing against R CircStats package
# Ref[1], page 180
alpha = np.array([356, 97, 211, 232, 343, 292, 157, 302, 335, 302, 324,
85, 324, 340, 157, 238, 254, 146, 232, 122, 329])*u.deg
beta = np.array([119, 162, 221, 259, 270, 29, 97, 292, 40, 313, 94, 45,
47, 108, 221, 270, 119, 248, 270, 45, 23])*u.deg
answer = 0.2704648
assert_allclose(answer, circcorrcoef(alpha, beta), atol=1e-4)
def test_rayleightest():
# testing against R CircStats package
data = np.array([190.18, 175.48, 155.95, 217.83, 156.36])*u.deg
# answer was obtained through R CircStats function r.test(x)
answer = (0.00640418, 0.9202565)
result = (rayleightest(data), _length(data))
assert_allclose(answer[0], result[0], atol=1e-4)
assert_allclose(answer[1], result[1], atol=1e-4)
@pytest.mark.skipif('not HAS_SCIPY')
def test_vtest():
# testing against R CircStats package
data = np.array([190.18, 175.48, 155.95, 217.83, 156.36])*u.deg
# answer was obtained through R CircStats function v0.test(x)
answer = 0.9994725
assert_allclose(answer, vtest(data), atol=1e-5)
def test_vonmisesmle():
# testing against R CircStats package
# testing non-Quantity
data = np.array([3.3699057, 4.0411630, 0.5014477, 2.6223103, 3.7336524,
1.8136389, 4.1566039, 2.7806317, 2.4672173,
2.8493644])
# answer was obtained through R CircStats function vm.ml(x)
answer = (3.006514, 1.474132)
assert_allclose(answer[0], vonmisesmle(data)[0], atol=1e-5)
assert_allclose(answer[1], vonmisesmle(data)[1], atol=1e-5)
# testing with Quantity
data = np.rad2deg(data)*u.deg
answer = np.rad2deg(3.006514)*u.deg
assert_equal(np.around(answer, 3), np.around(vonmisesmle(data)[0], 3))
|
15938acb3d0bd6fee679776d69f94131a53762283338ac46552fed25617c54df | import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy.stats.spatial import RipleysKEstimator
from astropy.utils.misc import NumpyRNGContext
a = np.array([[1, 4], [2, 5], [3, 6]])
b = np.array([[-1, 1], [-2, 2], [-3, 3]])
@pytest.mark.parametrize("points, x_min, x_max", [(a, 0, 10), (b, -5, 5)])
def test_ripley_K_implementation(points, x_min, x_max):
"""
Test against Ripley's K function implemented in R package `spatstat`
+-+---------+---------+----------+---------+-+
6 + * +
| |
| |
5.5 + +
| |
| |
5 + * +
| |
4.5 + +
| |
| |
4 + * +
+-+---------+---------+----------+---------+-+
1 1.5 2 2.5 3
+-+---------+---------+----------+---------+-+
3 + * +
| |
| |
2.5 + +
| |
| |
2 + * +
| |
1.5 + +
| |
| |
1 + * +
+-+---------+---------+----------+---------+-+
-3 -2.5 -2 -1.5 -1
"""
area = 100
r = np.linspace(0, 2.5, 5)
Kest = RipleysKEstimator(area=area, x_min=x_min, y_min=x_min, x_max=x_max,
y_max=x_max)
ANS_NONE = np.array([0, 0, 0, 66.667, 66.667])
assert_allclose(ANS_NONE, Kest(data=points, radii=r, mode='none'),
atol=1e-3)
ANS_TRANS = np.array([0, 0, 0, 82.304, 82.304])
assert_allclose(ANS_TRANS, Kest(data=points, radii=r, mode='translation'),
atol=1e-3)
with NumpyRNGContext(123):
a = np.random.uniform(low=5, high=10, size=(100, 2))
b = np.random.uniform(low=-5, high=-10, size=(100, 2))
@pytest.mark.parametrize("points", [a, b])
def test_ripley_uniform_property(points):
# Ripley's K function without edge-correction converges to the area when
# the number of points and the argument radii are large enough, i.e.,
# K(x) --> area as x --> inf
area = 50
Kest = RipleysKEstimator(area=area)
r = np.linspace(0, 20, 5)
assert_allclose(area, Kest(data=points, radii=r, mode='none')[4])
with NumpyRNGContext(123):
a = np.random.uniform(low=0, high=1, size=(500, 2))
b = np.random.uniform(low=-1, high=0, size=(500, 2))
@pytest.mark.parametrize("points, low, high", [(a, 0, 1), (b, -1, 0)])
def test_ripley_large_density(points, low, high):
Kest = RipleysKEstimator(area=1, x_min=low, x_max=high, y_min=low,
y_max=high)
r = np.linspace(0, 0.25, 25)
Kpos = Kest.poisson(r)
modes = ['ohser', 'translation', 'ripley']
for m in modes:
Kest_r = Kest(data=points, radii=r, mode=m)
assert_allclose(Kpos, Kest_r, atol=1e-1)
with NumpyRNGContext(123):
a = np.random.uniform(low=5, high=10, size=(500, 2))
b = np.random.uniform(low=-10, high=-5, size=(500, 2))
@pytest.mark.parametrize("points, low, high", [(a, 5, 10), (b, -10, -5)])
def test_ripley_modes(points, low, high):
Kest = RipleysKEstimator(area=25, x_max=high, y_max=high, x_min=low,
y_min=low)
r = np.linspace(0, 1.2, 25)
Kpos_mean = np.mean(Kest.poisson(r))
modes = ['ohser', 'translation', 'ripley']
for m in modes:
Kest_mean = np.mean(Kest(data=points, radii=r, mode=m))
assert_allclose(Kpos_mean, Kest_mean, atol=1e-1, rtol=1e-1)
with NumpyRNGContext(123):
a = np.random.uniform(low=0, high=1, size=(50, 2))
b = np.random.uniform(low=-1, high=0, size=(50, 2))
@pytest.mark.parametrize("points, low, high", [(a, 0, 1), (b, -1, 0)])
def test_ripley_large_density_var_width(points, low, high):
Kest = RipleysKEstimator(area=1, x_min=low, x_max=high, y_min=low,
y_max=high)
r = np.linspace(0, 0.25, 25)
Kpos = Kest.poisson(r)
Kest_r = Kest(data=points, radii=r, mode='var-width')
assert_allclose(Kpos, Kest_r, atol=1e-1)
with NumpyRNGContext(123):
a = np.random.uniform(low=5, high=10, size=(50, 2))
b = np.random.uniform(low=-10, high=-5, size=(50, 2))
@pytest.mark.parametrize("points, low, high", [(a, 5, 10), (b, -10, -5)])
def test_ripley_var_width(points, low, high):
Kest = RipleysKEstimator(area=25, x_max=high, y_max=high, x_min=low,
y_min=low)
r = np.linspace(0, 1.2, 25)
Kest_ohser = np.mean(Kest(data=points, radii=r, mode='ohser'))
Kest_var_width = np.mean(Kest(data=points, radii=r, mode='var-width'))
assert_allclose(Kest_ohser, Kest_var_width, atol=1e-1, rtol=1e-1)
|
faaee3d141a81405106d57477f1ebbde3e1d7277b23e59aec518bd331453073f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
``showtable`` is a command-line script based on ``astropy.io`` and
``astropy.table`` for printing ASCII, FITS, HDF5 or VOTable files(s) to the
standard output.
Example usage of ``showtable``:
1. FITS::
$ showtable astropy/io/fits/tests/data/table.fits
target V_mag
------- -----
NGC1001 11.1
NGC1002 12.3
NGC1003 15.2
2. ASCII::
$ showtable astropy/io/ascii/tests/t/simple_csv.csv
a b c
--- --- ---
1 2 3
4 5 6
3. XML::
$ showtable astropy/io/votable/tests/data/names.xml --max-width 70
col1 col2 col3 ... col15 col16 col17
--- deg deg ... mag mag ---
------------------------- -------- ------- ... ----- ----- -----
SSTGLMC G000.0000+00.1611 0.0000 0.1611 ... -- -- AA
4. Print all the FITS tables in the current directory::
$ showtable *.fits
"""
import argparse
import textwrap
import warnings
from astropy import log
from astropy.table import Table
from astropy.utils.exceptions import AstropyUserWarning
def showtable(filename, args):
"""
Read a table and print to the standard output.
Parameters
----------
filename : str
The path to a FITS file.
"""
if args.info and args.stats:
warnings.warn('--info and --stats cannot be used together',
AstropyUserWarning)
if (any((args.max_lines, args.max_width, args.hide_unit, args.show_dtype))
and (args.info or args.stats)):
warnings.warn('print parameters are ignored if --info or --stats is '
'used', AstropyUserWarning)
# these parameters are passed to Table.read if they are specified in the
# command-line
read_kwargs = ('hdu', 'format', 'table_id', 'delimiter')
kwargs = {k: v for k, v in vars(args).items()
if k in read_kwargs and v is not None}
try:
table = Table.read(filename, **kwargs)
if args.info:
table.info('attributes')
elif args.stats:
table.info('stats')
else:
formatter = table.more if args.more else table.pprint
formatter(max_lines=args.max_lines, max_width=args.max_width,
show_unit=(False if args.hide_unit else None),
show_dtype=(True if args.show_dtype else None))
except OSError as e:
log.error(str(e))
def main(args=None):
"""The main function called by the `showtable` script."""
parser = argparse.ArgumentParser(
description=textwrap.dedent("""
Print tables from ASCII, FITS, HDF5, VOTable file(s). The tables
are read with 'astropy.table.Table.read' and are printed with
'astropy.table.Table.pprint'. The default behavior is to make the
table output fit onto a single screen page. For a long and wide
table this will mean cutting out inner rows and columns. To print
**all** the rows or columns use ``--max-lines=-1`` or
``max-width=-1``, respectively. The complete list of supported
formats can be found at
http://astropy.readthedocs.io/en/latest/io/unified.html#built-in-table-readers-writers
"""))
addarg = parser.add_argument
addarg('filename', nargs='+', help='path to one or more files')
addarg('--format', help='input table format, should be specified if it '
'cannot be automatically detected')
addarg('--more', action='store_true',
help='use the pager mode from Table.more')
addarg('--info', action='store_true',
help='show information about the table columns')
addarg('--stats', action='store_true',
help='show statistics about the table columns')
# pprint arguments
pprint_args = parser.add_argument_group('pprint arguments')
addarg = pprint_args.add_argument
addarg('--max-lines', type=int,
help='maximum number of lines in table output (default=screen '
'length, -1 for no limit)')
addarg('--max-width', type=int,
help='maximum width in table output (default=screen width, '
'-1 for no limit)')
addarg('--hide-unit', action='store_true',
help='hide the header row for unit (which is shown '
'only if one or more columns has a unit)')
addarg('--show-dtype', action='store_true',
help='always include a header row for column dtypes '
'(otherwise shown only if any column is multidimensional)')
# ASCII-specific arguments
ascii_args = parser.add_argument_group('ASCII arguments')
addarg = ascii_args.add_argument
addarg('--delimiter', help='column delimiter string')
# FITS-specific arguments
fits_args = parser.add_argument_group('FITS arguments')
addarg = fits_args.add_argument
addarg('--hdu', help='name of the HDU to show')
# HDF5-specific arguments
hdf5_args = parser.add_argument_group('HDF5 arguments')
addarg = hdf5_args.add_argument
addarg('--path', help='the path from which to read the table')
# VOTable-specific arguments
votable_args = parser.add_argument_group('VOTable arguments')
addarg = votable_args.add_argument
addarg('--table-id', help='the table to read in')
args = parser.parse_args(args)
for idx, filename in enumerate(args.filename):
if idx > 0:
print()
showtable(filename, args)
|
16e2e39f0e65a6403b4df9abba31be76237a0fd1e427dbb860ffec9b8d556264 | import numpy as np
from astropy.table import np_utils
def test_common_dtype():
"""
Test that allowed combinations are those expected.
"""
dtype = [('int', int),
('uint8', np.uint8),
('float32', np.float32),
('float64', np.float64),
('str', 'S2'),
('uni', 'U2'),
('bool', bool),
('object', np.object_)]
arr = np.empty(1, dtype=dtype)
fail = set()
succeed = set()
for name1, type1 in dtype:
for name2, type2 in dtype:
try:
np_utils.common_dtype([arr[name1], arr[name2]])
succeed.add(f'{name1} {name2}')
except np_utils.TableMergeError:
fail.add(f'{name1} {name2}')
# known bad combinations
bad = {'str int', 'str bool', 'uint8 bool', 'uint8 str', 'object float32',
'bool object', 'uni uint8', 'int str', 'bool str', 'bool float64',
'bool uni', 'str float32', 'uni float64', 'uni object', 'bool uint8',
'object float64', 'float32 bool', 'str uint8', 'uni bool', 'float64 bool',
'float64 object', 'int bool', 'uni int', 'uint8 object', 'int uni', 'uint8 uni',
'float32 uni', 'object uni', 'bool float32', 'uni float32', 'object str',
'int object', 'str float64', 'object int', 'float64 uni', 'bool int',
'object bool', 'object uint8', 'float32 object', 'str object', 'float64 str',
'float32 str'}
assert fail == bad
good = {'float64 int', 'int int', 'uint8 float64', 'uint8 int', 'str uni',
'float32 float32', 'float64 float64', 'float64 uint8', 'float64 float32',
'int uint8', 'int float32', 'uni str', 'int float64', 'uint8 float32',
'float32 int', 'float32 uint8', 'bool bool', 'uint8 uint8', 'str str',
'float32 float64', 'object object', 'uni uni'}
assert succeed == good
|
36781491e5b369591376e0731e778eb57bb11553caec5352be77a05d352a5466 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.utils.tests.test_metadata import MetaBaseTest
import gc
import os
import sys
import copy
from io import StringIO
from collections import OrderedDict
import pathlib
import pickle
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from astropy.io import fits
from astropy.table import (Table, QTable, Column, MaskedColumn, TableReplaceWarning,
TableAttribute)
from astropy.tests.helper import assert_follows_unicode_guidelines
from astropy.coordinates import SkyCoord
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from astropy import table
from astropy import units as u
from astropy.time import Time, TimeDelta
from .conftest import MaskedTable, MIXIN_COLS
from astropy.utils.compat.optional_deps import HAS_PANDAS # noqa
@pytest.fixture
def home_is_tmpdir(monkeypatch, tmpdir):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the temp directory.
"""
# For Unix
monkeypatch.setenv('HOME', str(tmpdir))
# For Windows
monkeypatch.setenv('USERPROFILE', str(tmpdir))
class SetupData:
def _setup(self, table_types):
self._table_type = table_types.Table
self._column_type = table_types.Column
@property
def a(self):
if self._column_type is not None:
if not hasattr(self, '_a'):
self._a = self._column_type(
[1, 2, 3], name='a', format='%d',
meta={'aa': [0, 1, 2, 3, 4]})
return self._a
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, '_b'):
self._b = self._column_type(
[4, 5, 6], name='b', format='%d', meta={'aa': 1})
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, '_c'):
self._c = self._column_type([7, 8, 9], 'c')
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, '_d'):
self._d = self._column_type([7, 8, 7], 'd')
return self._d
@property
def obj(self):
if self._column_type is not None:
if not hasattr(self, '_obj'):
self._obj = self._column_type([1, 'string', 3], 'obj', dtype='O')
return self._obj
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a, self.b])
return self._t
@pytest.mark.usefixtures('table_types')
class TestSetTableColumn(SetupData):
def test_set_row(self, table_types):
"""Set a row from a tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[1] = (20, 21)
assert t['a'][0] == 1
assert t['a'][1] == 20
assert t['a'][2] == 3
assert t['b'][0] == 4
assert t['b'][1] == 21
assert t['b'][2] == 6
def test_set_row_existing(self, table_types):
"""Set a row from another existing row"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[0] = t[1]
assert t[0][0] == 2
assert t[0][1] == 5
def test_set_row_fail_1(self, table_types):
"""Set a row from an incorrectly-sized or typed set of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = (20, 21, 22)
with pytest.raises(ValueError):
t[1] = 0
def test_set_row_fail_2(self, table_types):
"""Set a row from an incorrectly-typed tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = ('abc', 'def')
def test_set_new_col_new_table(self, table_types):
"""Create a new column in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t['aa'] = self.a
# Test that the new column name is 'aa' and that the values match
assert np.all(t['aa'] == self.a)
assert t.colnames == ['aa']
def test_set_new_col_new_table_quantity(self, table_types):
"""Create a new column (from a quantity) in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t['aa'] = np.array([1, 2, 3]) * u.m
assert np.all(t['aa'] == np.array([1, 2, 3]))
assert t['aa'].unit == u.m
t['bb'] = 3 * u.m
assert np.all(t['bb'] == 3)
assert t['bb'].unit == u.m
def test_set_new_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Add a column
t['bb'] = self.b
assert np.all(t['bb'] == self.b)
assert t.colnames == ['a', 'bb']
assert t['bb'].meta == self.b.meta
assert t['bb'].format == self.b.format
# Add another column
t['c'] = t['a']
assert np.all(t['c'] == t['a'])
assert t.colnames == ['a', 'bb', 'c']
assert t['c'].meta == t['a'].meta
assert t['c'].format == t['a'].format
# Add a multi-dimensional column
t['d'] = table_types.Column(np.arange(12).reshape(3, 2, 2))
assert t['d'].shape == (3, 2, 2)
assert t['d'][0, 0, 1] == 1
# Add column from a list
t['e'] = ['hello', 'the', 'world']
assert np.all(t['e'] == np.array(['hello', 'the', 'world']))
# Make sure setting existing column still works
t['e'] = ['world', 'hello', 'the']
assert np.all(t['e'] == np.array(['world', 'hello', 'the']))
# Add a column via broadcasting
t['f'] = 10
assert np.all(t['f'] == 10)
# Add a column from a Quantity
t['g'] = np.array([1, 2, 3]) * u.m
assert np.all(t['g'].data == np.array([1, 2, 3]))
assert t['g'].unit == u.m
# Add a column from a (scalar) Quantity
t['g'] = 3 * u.m
assert np.all(t['g'].data == 3)
assert t['g'].unit == u.m
def test_set_new_unmasked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.Column(name='b', data=[1, 2, 3]) # unmasked
t['b'] = b
assert np.all(t['b'] == b)
def test_set_new_masked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.MaskedColumn(name='b', data=[1, 2, 3]) # masked
t['b'] = b
assert np.all(t['b'] == b)
def test_set_new_col_existing_table_fail(self, table_types):
"""Generate failure when creating a new column using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Wrong size
with pytest.raises(ValueError):
t['b'] = [1, 2]
@pytest.mark.usefixtures('table_types')
class TestEmptyData():
def test_1(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', dtype=int, length=100))
assert len(t['a']) == 100
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', dtype=int, shape=(3, ), length=100))
assert len(t['a']) == 100
def test_3(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name='a', dtype=int))
assert len(t['a']) == 0
def test_4(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name='a', dtype=int, shape=(3, 4)))
assert len(t['a']) == 0
def test_5(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a')) # dtype is not specified
assert len(t['a']) == 0
def test_scalar(self, table_types):
"""Test related to #3811 where setting empty tables to scalar values
should raise an error instead of having an error raised when accessing
the table."""
t = table_types.Table()
with pytest.raises(TypeError, match='Empty table cannot have column set to scalar value'):
t.add_column(0)
def test_add_via_setitem_and_slice(self, table_types):
"""Test related to #3023 where a MaskedColumn is created with name=None
and then gets changed to name='a'. After PR #2790 this test fails
without the #3023 fix."""
t = table_types.Table()
t['a'] = table_types.Column([1, 2, 3])
t2 = t[:]
assert t2.colnames == t.colnames
@pytest.mark.usefixtures('table_types')
class TestNewFromColumns():
def test_simple(self, table_types):
cols = [table_types.Column(name='a', data=[1, 2, 3]),
table_types.Column(name='b', data=[4, 5, 6], dtype=np.float32)]
t = table_types.Table(cols)
assert np.all(t['a'].data == np.array([1, 2, 3]))
assert np.all(t['b'].data == np.array([4, 5, 6], dtype=np.float32))
assert type(t['b'][1]) is np.float32
def test_from_np_array(self, table_types):
cols = [table_types.Column(name='a', data=np.array([1, 2, 3], dtype=np.int64),
dtype=np.float64),
table_types.Column(name='b', data=np.array([4, 5, 6], dtype=np.float32))]
t = table_types.Table(cols)
assert np.all(t['a'] == np.array([1, 2, 3], dtype=np.float64))
assert np.all(t['b'] == np.array([4, 5, 6], dtype=np.float32))
assert type(t['a'][1]) is np.float64
assert type(t['b'][1]) is np.float32
def test_size_mismatch(self, table_types):
cols = [table_types.Column(name='a', data=[1, 2, 3]),
table_types.Column(name='b', data=[4, 5, 6, 7])]
with pytest.raises(ValueError):
table_types.Table(cols)
def test_name_none(self, table_types):
"""Column with name=None can init a table whether or not names are supplied"""
c = table_types.Column(data=[1, 2], name='c')
d = table_types.Column(data=[3, 4])
t = table_types.Table([c, d], names=(None, 'd'))
assert t.colnames == ['c', 'd']
t = table_types.Table([c, d])
assert t.colnames == ['c', 'col1']
@pytest.mark.usefixtures('table_types')
class TestReverse():
def test_reverse(self, table_types):
t = table_types.Table([[1, 2, 3],
['a', 'b', 'cc']])
t.reverse()
assert np.all(t['col0'] == np.array([3, 2, 1]))
assert np.all(t['col1'] == np.array(['cc', 'b', 'a']))
t2 = table_types.Table(t, copy=False)
assert np.all(t2['col0'] == np.array([3, 2, 1]))
assert np.all(t2['col1'] == np.array(['cc', 'b', 'a']))
t2 = table_types.Table(t, copy=True)
assert np.all(t2['col0'] == np.array([3, 2, 1]))
assert np.all(t2['col1'] == np.array(['cc', 'b', 'a']))
t2.sort('col0')
assert np.all(t2['col0'] == np.array([1, 2, 3]))
assert np.all(t2['col1'] == np.array(['a', 'b', 'cc']))
def test_reverse_big(self, table_types):
x = np.arange(10000)
y = x + 1
t = table_types.Table([x, y], names=('x', 'y'))
t.reverse()
assert np.all(t['x'] == x[::-1])
assert np.all(t['y'] == y[::-1])
def test_reverse_mixin(self):
"""Test reverse for a mixin with no item assignment, fix for #9836"""
sc = SkyCoord([1, 2], [3, 4], unit='deg')
t = Table([[2, 1], sc], names=['a', 'sc'])
t.reverse()
assert np.all(t['a'] == [1, 2])
assert np.allclose(t['sc'].ra.to_value('deg'), [2, 1])
@pytest.mark.usefixtures('table_types')
class TestRound():
def test_round_int(self, table_types):
t = table_types.Table([['a', 'b', 'c'],
[1.11, 2.3, 3.0],
[1.123456, 2.9876, 3.901]])
t.round()
assert np.all(t['col0'] == ['a', 'b', 'c'])
assert np.all(t['col1'] == [1., 2., 3.])
assert np.all(t['col2'] == [1., 3., 4.])
def test_round_dict(self, table_types):
t = table_types.Table([['a', 'b', 'c'],
[1.5, 2.5, 3.0111],
[1.123456, 2.9876, 3.901]])
t.round({'col1': 0, 'col2': 3})
assert np.all(t['col0'] == ['a', 'b', 'c'])
assert np.all(t['col1'] == [2.0, 2.0, 3.0])
assert np.all(t['col2'] == [1.123, 2.988, 3.901])
def test_round_invalid(self, table_types):
t = table_types.Table([[1, 2, 3]])
with pytest.raises(ValueError, match="'decimals' argument must be an int or a dict"):
t.round(0.5)
def test_round_kind(self, table_types):
for typecode in 'bBhHiIlLqQpPefdgFDG': # AllInteger, AllFloat
arr = np.array([4, 16], dtype=typecode)
t = Table([arr])
col0 = t['col0']
t.round(decimals=-1) # Round to nearest 10
assert np.all(t['col0'] == [0, 20])
assert t['col0'] is col0
@pytest.mark.usefixtures('table_types')
class TestColumnAccess():
def test_1(self, table_types):
t = table_types.Table()
with pytest.raises(KeyError):
t['a']
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[1, 2, 3]))
assert np.all(t['a'] == np.array([1, 2, 3]))
with pytest.raises(KeyError):
t['b'] # column does not exist
def test_itercols(self, table_types):
names = ['a', 'b', 'c']
t = table_types.Table([[1], [2], [3]], names=names)
for name, col in zip(names, t.itercols()):
assert name == col.name
assert isinstance(col, table_types.Column)
@pytest.mark.usefixtures('table_types')
class TestAddLength(SetupData):
def test_right_length(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b)
def test_too_long(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='b', data=[4, 5, 6, 7])) # data too long
def test_too_short(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='b', data=[4, 5])) # data too short
@pytest.mark.usefixtures('table_types')
class TestAddPosition(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 0)
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 1)
def test_3(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, -1)
def test_5(self, table_types):
self._setup(table_types)
t = table_types.Table()
with pytest.raises(ValueError):
t.index_column('b')
def test_6(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b)
assert t.colnames == ['a', 'b']
def test_7(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column('a'))
assert t.colnames == ['b', 'a']
def test_8(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column('a') + 1)
assert t.colnames == ['a', 'b']
def test_9(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b, t.index_column('a') + 1)
t.add_column(self.c, t.index_column('b'))
assert t.colnames == ['a', 'c', 'b']
def test_10(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
ia = t.index_column('a')
t.add_column(self.b, ia + 1)
t.add_column(self.c, ia)
assert t.colnames == ['c', 'a', 'b']
@pytest.mark.usefixtures('table_types')
class TestAddName(SetupData):
def test_override_name(self, table_types):
self._setup(table_types)
t = table_types.Table()
# Check that we can override the name of the input column in the Table
t.add_column(self.a, name='b')
t.add_column(self.b, name='a')
assert t.colnames == ['b', 'a']
# Check that we did not change the name of the input column
assert self.a.info.name == 'a'
assert self.b.info.name == 'b'
# Now test with an input column from another table
t2 = table_types.Table()
t2.add_column(t['a'], name='c')
assert t2.colnames == ['c']
# Check that we did not change the name of the input column
assert t.colnames == ['b', 'a']
# Check that we can give a name if none was present
col = table_types.Column([1, 2, 3])
t.add_column(col, name='c')
assert t.colnames == ['b', 'a', 'c']
def test_default_name(self, table_types):
t = table_types.Table()
col = table_types.Column([1, 2, 3])
t.add_column(col)
assert t.colnames == ['col0']
@pytest.mark.usefixtures('table_types')
class TestInitFromTable(SetupData):
def test_from_table_cols(self, table_types):
"""Ensure that using cols from an existing table gives
a clean copy.
"""
self._setup(table_types)
t = self.t
cols = t.columns
# Construct Table with cols via Table._new_from_cols
t2a = table_types.Table([cols['a'], cols['b'], self.c])
# Construct with add_column
t2b = table_types.Table()
t2b.add_column(cols['a'])
t2b.add_column(cols['b'])
t2b.add_column(self.c)
t['a'][1] = 20
t['b'][1] = 21
for t2 in [t2a, t2b]:
t2['a'][2] = 10
t2['b'][2] = 11
t2['c'][2] = 12
t2.columns['a'].meta['aa'][3] = 10
assert np.all(t['a'] == np.array([1, 20, 3]))
assert np.all(t['b'] == np.array([4, 21, 6]))
assert np.all(t2['a'] == np.array([1, 2, 10]))
assert np.all(t2['b'] == np.array([4, 5, 11]))
assert np.all(t2['c'] == np.array([7, 8, 12]))
assert t2['a'].name == 'a'
assert t2.columns['a'].meta['aa'][3] == 10
assert t.columns['a'].meta['aa'][3] == 3
@pytest.mark.usefixtures('table_types')
class TestAddColumns(SetupData):
def test_add_columns1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c])
assert t.colnames == ['a', 'b', 'c']
def test_add_columns2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d])
assert t.colnames == ['a', 'b', 'c', 'd']
assert np.all(t['c'] == np.array([7, 8, 9]))
def test_add_columns3(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[1, 0])
assert t.colnames == ['d', 'a', 'c', 'b']
def test_add_columns4(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[0, 0])
assert t.colnames == ['c', 'd', 'a', 'b']
def test_add_columns5(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[2, 2])
assert t.colnames == ['a', 'b', 'c', 'd']
def test_add_columns6(self, table_types):
"""Check that we can override column names."""
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c], names=['b', 'c', 'a'])
assert t.colnames == ['b', 'c', 'a']
def test_add_columns7(self, table_types):
"""Check that default names are used when appropriate."""
t = table_types.Table()
col0 = table_types.Column([1, 2, 3])
col1 = table_types.Column([4, 5, 3])
t.add_columns([col0, col1])
assert t.colnames == ['col0', 'col1']
def test_add_duplicate_column(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='a', data=[0, 1, 2]))
t.add_column(table_types.Column(name='a', data=[0, 1, 2]),
rename_duplicate=True)
t.add_column(self.b)
t.add_column(self.c)
assert t.colnames == ['a', 'a_1', 'b', 'c']
t.add_column(table_types.Column(name='a', data=[0, 1, 2]),
rename_duplicate=True)
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2']
# test adding column from a separate Table
t1 = table_types.Table()
t1.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(t1['a'])
t.add_column(t1['a'], rename_duplicate=True)
t1['a'][0] = 100 # Change original column
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2', 'a_3']
assert t1.colnames == ['a']
# Check new column didn't change (since name conflict forced a copy)
assert t['a_3'][0] == self.a[0]
# Check that rename_duplicate=True is ok if there are no duplicates
t.add_column(table_types.Column(name='q', data=[0, 1, 2]),
rename_duplicate=True)
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2', 'a_3', 'q']
def test_add_duplicate_columns(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.c])
with pytest.raises(ValueError):
t.add_columns([table_types.Column(name='a', data=[0, 1, 2]),
table_types.Column(name='b', data=[0, 1, 2])])
t.add_columns([table_types.Column(name='a', data=[0, 1, 2]),
table_types.Column(name='b', data=[0, 1, 2])],
rename_duplicate=True)
t.add_column(self.d)
assert t.colnames == ['a', 'b', 'c', 'a_1', 'b_1', 'd']
@pytest.mark.usefixtures('table_types')
class TestAddRow(SetupData):
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, '_b'):
self._b = self._column_type(name='b', data=[4.0, 5.1, 6.2])
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, '_c'):
self._c = self._column_type(name='c', data=['7', '8', '9'])
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, '_d'):
self._d = self._column_type(name='d', data=[[1, 2], [3, 4], [5, 6]])
return self._d
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a, self.b, self.c])
return self._t
def test_add_none_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=('a', 'b', 'c'), dtype=('(2,)i', 'S4', 'O'))
t.add_row()
assert np.all(t['a'][0] == [0, 0])
assert t['b'][0] == ''
assert t['c'][0] == 0
t.add_row()
assert np.all(t['a'][1] == [0, 0])
assert t['b'][1] == ''
assert t['c'][1] == 0
def test_add_stuff_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=('a', 'b', 'obj'), dtype=('(2,)i', 'S8', 'O'))
t.add_row([[1, 2], 'hello', 'world'])
assert np.all(t['a'][0] == [1, 2])
assert t['b'][0] == 'hello'
assert t['obj'][0] == 'world'
# Make sure it is not repeating last row but instead
# adding zeros (as documented)
t.add_row()
assert np.all(t['a'][1] == [0, 0])
assert t['b'][1] == ''
assert t['obj'][1] == 0
def test_add_table_row(self, table_types):
self._setup(table_types)
t = self.t
t['d'] = self.d
t2 = table_types.Table([self.a, self.b, self.c, self.d])
t.add_row(t2[0])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 1]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t['c'] == np.array(['7', '8', '9', '7']))
assert np.all(t['d'] == np.array([[1, 2], [3, 4], [5, 6], [1, 2]]))
def test_add_table_row_obj(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.obj])
t.add_row([1, 4.0, [10]])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 1]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t['obj'] == np.array([1, 'string', 3, [10]], dtype='O'))
def test_add_qtable_row_multidimensional(self):
q = [[1, 2], [3, 4]] * u.m
qt = table.QTable([q])
qt.add_row(([5, 6] * u.km,))
assert np.all(qt['col0'] == [[1, 2], [3, 4], [5000, 6000]] * u.m)
def test_add_with_tuple(self, table_types):
self._setup(table_types)
t = self.t
t.add_row((4, 7.2, '1'))
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t['c'] == np.array(['7', '8', '9', '1']))
def test_add_with_list(self, table_types):
self._setup(table_types)
t = self.t
t.add_row([4, 7.2, '10'])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t['c'] == np.array(['7', '8', '9', '10']))
def test_add_with_dict(self, table_types):
self._setup(table_types)
t = self.t
t.add_row({'a': 4, 'b': 7.2})
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
if t.masked:
assert np.all(t['c'] == np.array(['7', '8', '9', '7']))
else:
assert np.all(t['c'] == np.array(['7', '8', '9', '']))
def test_add_with_none(self, table_types):
self._setup(table_types)
t = self.t
t.add_row()
assert len(t) == 4
assert np.all(t['a'].data == np.array([1, 2, 3, 0]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 0.0]))
assert np.all(t['c'].data == np.array(['7', '8', '9', '']))
def test_add_missing_column(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row({'bad_column': 1})
def test_wrong_size_tuple(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row((1, 2))
def test_wrong_vals_type(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(TypeError):
t.add_row(1)
def test_add_row_failures(self, table_types):
self._setup(table_types)
t = self.t
t_copy = table_types.Table(t, copy=True)
# Wrong number of columns
try:
t.add_row([1, 2, 3, 4])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
# Wrong data type
try:
t.add_row(['one', 2, 3])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
def test_insert_table_row(self, table_types):
"""
Light testing of Table.insert_row() method. The deep testing is done via
the add_row() tests which calls insert_row(index=len(self), ...), so
here just test that the added index parameter is handled correctly.
"""
self._setup(table_types)
row = (10, 40.0, 'x', [10, 20])
for index in range(-3, 4):
indices = np.insert(np.arange(3), index, 3)
t = table_types.Table([self.a, self.b, self.c, self.d])
t2 = t.copy()
t.add_row(row) # By now we know this works
t2.insert_row(index, row)
for name in t.colnames:
if t[name].dtype.kind == 'f':
assert np.allclose(t[name][indices], t2[name])
else:
assert np.all(t[name][indices] == t2[name])
for index in (-4, 4):
t = table_types.Table([self.a, self.b, self.c, self.d])
with pytest.raises(IndexError):
t.insert_row(index, row)
@pytest.mark.usefixtures('table_types')
class TestTableColumn(SetupData):
def test_column_view(self, table_types):
self._setup(table_types)
t = self.t
a = t.columns['a']
a[2] = 10
assert t['a'][2] == 10
@pytest.mark.usefixtures('table_types')
class TestArrayColumns(SetupData):
def test_1d(self, table_types):
self._setup(table_types)
b = table_types.Column(name='b', dtype=int, shape=(2, ), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t['b'].shape == (3, 2)
assert t['b'][0].shape == (2, )
def test_2d(self, table_types):
self._setup(table_types)
b = table_types.Column(name='b', dtype=int, shape=(2, 4), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t['b'].shape == (3, 2, 4)
assert t['b'][0].shape == (2, 4)
def test_3d(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
b = table_types.Column(name='b', dtype=int, shape=(2, 4, 6), length=3)
t.add_column(b)
assert t['b'].shape == (3, 2, 4, 6)
assert t['b'][0].shape == (2, 4, 6)
@pytest.mark.usefixtures('table_types')
class TestRemove(SetupData):
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a])
return self._t
@property
def t2(self):
if self._table_type is not None:
if not hasattr(self, '_t2'):
self._t2 = self._table_type([self.a, self.b, self.c])
return self._t2
def test_1(self, table_types):
self._setup(table_types)
self.t.remove_columns('a')
assert self.t.colnames == []
assert self.t.as_array().size == 0
# Regression test for gh-8640
assert not self.t
assert isinstance(self.t == None, np.ndarray) # noqa
assert (self.t == None).size == 0 # noqa
def test_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_columns('a')
assert self.t.colnames == ['b']
assert self.t.dtype.names == ('b',)
assert np.all(self.t['b'] == np.array([4, 5, 6]))
def test_3(self, table_types):
"""Check remove_columns works for a single column with a name of
more than one character. Regression test against #2699"""
self._setup(table_types)
self.t['new_column'] = self.t['a']
assert 'new_column' in self.t.columns.keys()
self.t.remove_columns('new_column')
assert 'new_column' not in self.t.columns.keys()
def test_remove_nonexistent_row(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
self.t.remove_row(4)
def test_remove_row_0(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(0)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['b'] == np.array([5, 6]))
def test_remove_row_1(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(1)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['a'] == np.array([1, 3]))
def test_remove_row_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(2)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([7, 8]))
def test_remove_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows(slice(0, 2, 1))
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([9]))
def test_remove_row_list(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows([0, 2])
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([8]))
def test_remove_row_preserves_meta(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_rows([0, 2])
assert self.t['a'].meta == {'aa': [0, 1, 2, 3, 4]}
assert self.t.dtype == np.dtype([('a', 'int'),
('b', 'int')])
def test_delitem_row(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[1]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['a'] == np.array([1, 3]))
@pytest.mark.parametrize("idx", [[0, 2], np.array([0, 2])])
def test_delitem_row_list(self, table_types, idx):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[idx]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([8]))
def test_delitem_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[0:2]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([9]))
def test_delitem_row_fail(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[4]
def test_delitem_row_float(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[1.]
def test_delitem1(self, table_types):
self._setup(table_types)
del self.t['a']
assert self.t.colnames == []
assert self.t.as_array().size == 0
# Regression test for gh-8640
assert not self.t
assert isinstance(self.t == None, np.ndarray) # noqa
assert (self.t == None).size == 0 # noqa
def test_delitem2(self, table_types):
self._setup(table_types)
del self.t2['b']
assert self.t2.colnames == ['a', 'c']
def test_delitems(self, table_types):
self._setup(table_types)
del self.t2['a', 'b']
assert self.t2.colnames == ['c']
def test_delitem_fail(self, table_types):
self._setup(table_types)
with pytest.raises(KeyError):
del self.t['d']
@pytest.mark.usefixtures('table_types')
class TestKeep(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns([])
assert t.colnames == []
assert t.as_array().size == 0
# Regression test for gh-8640
assert not t
assert isinstance(t == None, np.ndarray) # noqa
assert (t == None).size == 0 # noqa
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns('b')
assert t.colnames == ['b']
assert t.dtype.names == ('b',)
assert np.all(t['b'] == np.array([4, 5, 6]))
@pytest.mark.usefixtures('table_types')
class TestRename(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.rename_column('a', 'b')
assert t.colnames == ['b']
assert t.dtype.names == ('b',)
assert np.all(t['b'] == np.array([1, 2, 3]))
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.rename_column('a', 'c')
t.rename_column('b', 'a')
assert t.colnames == ['c', 'a']
assert t.dtype.names == ('c', 'a')
if t.masked:
assert t.mask.dtype.names == ('c', 'a')
assert np.all(t['c'] == np.array([1, 2, 3]))
assert np.all(t['a'] == np.array([4, 5, 6]))
def test_rename_by_attr(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t['a'].name = 'c'
t['b'].name = 'a'
assert t.colnames == ['c', 'a']
assert t.dtype.names == ('c', 'a')
assert np.all(t['c'] == np.array([1, 2, 3]))
assert np.all(t['a'] == np.array([4, 5, 6]))
def test_rename_columns(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.c])
t.rename_columns(('a', 'b', 'c'), ('aa', 'bb', 'cc'))
assert t.colnames == ['aa', 'bb', 'cc']
t.rename_columns(['bb', 'cc'], ['b', 'c'])
assert t.colnames == ['aa', 'b', 'c']
with pytest.raises(TypeError):
t.rename_columns(('aa'), ['a'])
with pytest.raises(ValueError):
t.rename_columns(['a'], ['b', 'c'])
@pytest.mark.usefixtures('table_types')
class TestSort():
def test_single(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4]))
t.add_column(table_types.Column(name='c', data=[(1, 2), (3, 4), (4, 5)]))
assert np.all(t['a'] == np.array([2, 1, 3]))
assert np.all(t['b'] == np.array([6, 5, 4]))
t.sort('a')
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['b'] == np.array([5, 6, 4]))
assert np.all(t['c'] == np.array([[3, 4],
[1, 2],
[4, 5]]))
t.sort('b')
assert np.all(t['a'] == np.array([3, 1, 2]))
assert np.all(t['b'] == np.array([4, 5, 6]))
assert np.all(t['c'] == np.array([[4, 5],
[3, 4],
[1, 2]]))
@pytest.mark.parametrize('create_index', [False, True])
def test_single_reverse(self, table_types, create_index):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4]))
t.add_column(table_types.Column(name='c', data=[(1, 2), (3, 4), (4, 5)]))
assert np.all(t['a'] == np.array([2, 1, 3]))
assert np.all(t['b'] == np.array([6, 5, 4]))
t.sort('a', reverse=True)
assert np.all(t['a'] == np.array([3, 2, 1]))
assert np.all(t['b'] == np.array([4, 6, 5]))
assert np.all(t['c'] == np.array([[4, 5],
[1, 2],
[3, 4]]))
t.sort('b', reverse=True)
assert np.all(t['a'] == np.array([2, 1, 3]))
assert np.all(t['b'] == np.array([6, 5, 4]))
assert np.all(t['c'] == np.array([[1, 2],
[3, 4],
[4, 5]]))
def test_single_big(self, table_types):
"""Sort a big-ish table with a non-trivial sort order"""
x = np.arange(10000)
y = np.sin(x)
t = table_types.Table([x, y], names=('x', 'y'))
t.sort('y')
idx = np.argsort(y)
assert np.all(t['x'] == x[idx])
assert np.all(t['y'] == y[idx])
@pytest.mark.parametrize('reverse', [True, False])
def test_empty_reverse(self, table_types, reverse):
t = table_types.Table([[], []], dtype=['f4', 'U1'])
t.sort('col1', reverse=reverse)
def test_multiple(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t['a'] == np.array([2, 1, 3, 2, 3, 1]))
assert np.all(t['b'] == np.array([6, 5, 4, 3, 5, 4]))
t.sort(['a', 'b'])
assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5]))
t.sort(['b', 'a'])
assert np.all(t['a'] == np.array([2, 1, 3, 1, 3, 2]))
assert np.all(t['b'] == np.array([3, 4, 4, 5, 5, 6]))
t.sort(('a', 'b'))
assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5]))
def test_multiple_reverse(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t['a'] == np.array([2, 1, 3, 2, 3, 1]))
assert np.all(t['b'] == np.array([6, 5, 4, 3, 5, 4]))
t.sort(['a', 'b'], reverse=True)
assert np.all(t['a'] == np.array([3, 3, 2, 2, 1, 1]))
assert np.all(t['b'] == np.array([5, 4, 6, 3, 5, 4]))
t.sort(['b', 'a'], reverse=True)
assert np.all(t['a'] == np.array([2, 3, 1, 3, 1, 2]))
assert np.all(t['b'] == np.array([6, 5, 5, 4, 4, 3]))
t.sort(('a', 'b'), reverse=True)
assert np.all(t['a'] == np.array([3, 3, 2, 2, 1, 1]))
assert np.all(t['b'] == np.array([5, 4, 6, 3, 5, 4]))
def test_multiple_with_bytes(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"]))
t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
t.sort(['name', 'firstname'])
assert np.all([t['firstname'] == np.array([b"John", b"Jo", b"Max"])])
assert np.all([t['name'] == np.array([b"Jackson", b"Miller", b"Miller"])])
assert np.all([t['tel'] == np.array([19, 15, 12])])
def test_multiple_with_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(table_types.Column(
name='firstname',
data=[str(x) for x in ["Max", "Jo", "John"]]))
t.add_column(table_types.Column(
name='name',
data=[str(x) for x in ["Miller", "Miller", "Jackson"]]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
t.sort(['name', 'firstname'])
assert np.all([t['firstname'] == np.array(
[str(x) for x in ["John", "Jo", "Max"]])])
assert np.all([t['name'] == np.array(
[str(x) for x in ["Jackson", "Miller", "Miller"]])])
assert np.all([t['tel'] == np.array([19, 15, 12])])
def test_argsort(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t.argsort() == t.as_array().argsort())
i0 = t.argsort('a')
i1 = t.as_array().argsort(order=['a'])
assert np.all(t['a'][i0] == t['a'][i1])
i0 = t.argsort(['a', 'b'])
i1 = t.as_array().argsort(order=['a', 'b'])
assert np.all(t['a'][i0] == t['a'][i1])
assert np.all(t['b'][i0] == t['b'][i1])
@pytest.mark.parametrize('add_index', [False, True])
def test_argsort_reverse(self, table_types, add_index):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
if add_index:
t.add_index('a')
assert np.all(t.argsort(reverse=True) == np.array([4, 2, 0, 3, 1, 5]))
i0 = t.argsort('a', reverse=True)
i1 = np.array([4, 2, 3, 0, 5, 1])
assert np.all(t['a'][i0] == t['a'][i1])
i0 = t.argsort(['a', 'b'], reverse=True)
i1 = np.array([4, 2, 0, 3, 1, 5])
assert np.all(t['a'][i0] == t['a'][i1])
assert np.all(t['b'][i0] == t['b'][i1])
def test_argsort_bytes(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"]))
t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0]))
def test_argsort_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(table_types.Column(
name='firstname',
data=[str(x) for x in ["Max", "Jo", "John"]]))
t.add_column(table_types.Column(
name='name',
data=[str(x) for x in ["Miller", "Miller", "Jackson"]]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0]))
def test_rebuild_column_view_then_rename(self, table_types):
"""
Issue #2039 where renaming fails after any method that calls
_rebuild_table_column_view (this includes sort and add_row).
"""
t = table_types.Table([[1]], names=('a',))
assert t.colnames == ['a']
assert t.dtype.names == ('a',)
t.add_row((2,))
assert t.colnames == ['a']
assert t.dtype.names == ('a',)
t.rename_column('a', 'b')
assert t.colnames == ['b']
assert t.dtype.names == ('b',)
t.sort('b')
assert t.colnames == ['b']
assert t.dtype.names == ('b',)
t.rename_column('b', 'c')
assert t.colnames == ['c']
assert t.dtype.names == ('c',)
@pytest.mark.parametrize('kwargs', [{}, {'kind': 'stable'}, {'kind': 'quicksort'}])
def test_sort_kind(kwargs):
t = Table()
t['a'] = [2, 1, 3, 2, 3, 1]
t['b'] = [6, 5, 4, 3, 5, 4]
t_struct = t.as_array()
# Since sort calls Table.argsort this covers `kind` for both methods
t.sort(['a', 'b'], **kwargs)
assert np.all(t.as_array() == np.sort(t_struct, **kwargs))
@pytest.mark.usefixtures('table_types')
class TestIterator():
def test_iterator(self, table_types):
d = np.array([(2, 1),
(3, 6),
(4, 5)], dtype=[('a', 'i4'), ('b', 'i4')])
t = table_types.Table(d)
if t.masked:
with pytest.raises(ValueError):
t[0] == d[0]
else:
for row, np_row in zip(t, d):
assert np.all(row == np_row)
@pytest.mark.usefixtures('table_types')
class TestSetMeta():
def test_set_meta(self, table_types):
d = table_types.Table(names=('a', 'b'))
d.meta['a'] = 1
d.meta['b'] = 1
d.meta['c'] = 1
d.meta['d'] = 1
assert list(d.meta.keys()) == ['a', 'b', 'c', 'd']
@pytest.mark.usefixtures('table_types')
class TestConvertNumpyArray():
def test_convert_numpy_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=('a', 'b'))
np_data = np.array(d)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert np_data is not d.as_array()
assert d.colnames == list(np_data.dtype.names)
np_data = np.array(d, copy=False)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert d.colnames == list(np_data.dtype.names)
with pytest.raises(ValueError):
np_data = np.array(d, dtype=[('c', 'i8'), ('d', 'i8')])
def test_as_array_byteswap(self, table_types):
"""Test for https://github.com/astropy/astropy/pull/4080"""
byte_orders = ('>', '<')
native_order = byte_orders[sys.byteorder == 'little']
for order in byte_orders:
col = table_types.Column([1.0, 2.0], name='a', dtype=order + 'f8')
t = table_types.Table([col])
arr = t.as_array()
assert arr['a'].dtype.byteorder in (native_order, '=')
arr = t.as_array(keep_byteorder=True)
if order == native_order:
assert arr['a'].dtype.byteorder in (order, '=')
else:
assert arr['a'].dtype.byteorder == order
def test_byteswap_fits_array(self, table_types):
"""
Test for https://github.com/astropy/astropy/pull/4080, demonstrating
that FITS tables are converted to native byte order.
"""
non_native_order = ('>', '<')[sys.byteorder != 'little']
filename = get_pkg_data_filename('data/tb.fits',
'astropy.io.fits.tests')
t = table_types.Table.read(filename)
arr = t.as_array()
for idx in range(len(arr.dtype)):
assert arr.dtype[idx].byteorder != non_native_order
with fits.open(filename, character_as_bytes=True) as hdul:
data = hdul[1].data
for colname in data.columns.names:
assert np.all(data[colname] == arr[colname])
arr2 = t.as_array(keep_byteorder=True)
for colname in data.columns.names:
assert (data[colname].dtype.byteorder
== arr2[colname].dtype.byteorder)
def _assert_copies(t, t2, deep=True):
assert t.colnames == t2.colnames
np.testing.assert_array_equal(t.as_array(), t2.as_array())
assert t.meta == t2.meta
for col, col2 in zip(t.columns.values(), t2.columns.values()):
if deep:
assert not np.may_share_memory(col, col2)
else:
assert np.may_share_memory(col, col2)
def test_copy():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'])
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_masked():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'], masked=True,
meta={'name': 'test'})
t['x'].mask == [True, False, True]
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_protocol():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'])
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
_assert_copies(t, t2, deep=False)
_assert_copies(t, t3)
def test_disallow_inequality_comparisons():
"""
Regression test for #828 - disallow comparison operators on whole Table
"""
t = table.Table()
with pytest.raises(TypeError):
t > 2
with pytest.raises(TypeError):
t < 1.1
with pytest.raises(TypeError):
t >= 5.5
with pytest.raises(TypeError):
t <= -1.1
def test_values_equal_part1():
col1 = [1, 2]
col2 = [1.0, 2.0]
col3 = ['a', 'b']
t1 = table.Table([col1, col2, col3], names=['a', 'b', 'c'])
t2 = table.Table([col1, col2], names=['a', 'b'])
t3 = table.table_helpers.simple_table()
tm = t1.copy()
tm['time'] = Time([1, 2], format='cxcsec')
tm1 = tm.copy()
tm1['time'][0] = np.ma.masked
tq = table.table_helpers.simple_table()
tq['quantity'] = [1., 2., 3.] * u.m
tsk = table.table_helpers.simple_table()
tsk['sk'] = SkyCoord(1, 2, unit='deg')
eqsk = tsk.values_equal(tsk)
for col in eqsk.itercols():
assert np.all(col)
with pytest.raises(ValueError, match='cannot compare tables with different column names'):
t2.values_equal(t1)
with pytest.raises(ValueError, match='unable to compare column a'):
# Shape mismatch
t3.values_equal(t1)
with pytest.raises(ValueError, match='unable to compare column c'):
# Type mismatch in column c causes FutureWarning
t1.values_equal(2)
with pytest.raises(ValueError, match='unable to compare column c'):
t1.values_equal([1, 2])
eq = t2.values_equal(t2)
for col in eq.colnames:
assert np.all(eq[col] == [True, True])
eq1 = tm1.values_equal(tm)
for col in eq1.colnames:
assert np.all(eq1[col] == [True, True])
eq2 = tq.values_equal(tq)
for col in eq2.colnames:
assert np.all(eq2[col] == [True, True, True])
eq3 = t2.values_equal(2)
for col in eq3.colnames:
assert np.all(eq3[col] == [False, True])
eq4 = t2.values_equal([1, 2])
for col in eq4.colnames:
assert np.all(eq4[col] == [True, True])
# Compare table to its first row
t = table.Table(rows=[(1, 'a'),
(1, 'b')])
eq = t.values_equal(t[0])
assert np.all(eq['col0'] == [True, True])
assert np.all(eq['col1'] == [True, False])
def test_rows_equal():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7'],
format='ascii')
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all((t == t2.as_array()) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t.as_array() == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
def test_equality_masked():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# Make into masked table
t = table.Table(t, masked=True)
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that masking a value causes the row to differ
t.mask['a'][0] = True
assert np.all((t == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([1, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all((t == t2.as_array()) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
@pytest.mark.xfail
def test_equality_masked_bug():
"""
This highlights a Numpy bug. Once it works, it can be moved into the
test_equality_masked test. Related Numpy bug report:
https://github.com/numpy/numpy/issues/3840
"""
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
t = table.Table(t, masked=True)
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
assert np.all((t.as_array() == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
class TestMetaTable(MetaBaseTest):
test_class = table.Table
args = ()
def test_unicode_content():
# If we don't have unicode literals then return
if isinstance('', bytes):
return
# Define unicode literals
string_a = 'астрономическая питона'
string_b = 'миллиарды световых лет'
a = table.Table(
[[string_a, 2],
[string_b, 3]],
names=('a', 'b'))
assert string_a in str(a)
# This only works because the coding of this file is utf-8, which
# matches the default encoding of Table.__str__
assert string_a.encode('utf-8') in bytes(a)
def test_unicode_policy():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
assert_follows_unicode_guidelines(t)
@pytest.mark.parametrize('uni', ['питона', 'ascii'])
def test_unicode_bytestring_conversion(table_types, uni):
"""
Test converting columns to all unicode or all bytestring. This
makes two columns, one which is unicode (str in Py3) and one which
is bytes (UTF-8 encoded). There are two code paths in the conversions,
a faster one where the data are actually ASCII and a slower one where
UTF-8 conversion is required. This tests both via the ``uni`` param.
"""
byt = uni.encode('utf-8')
t = table_types.Table([[byt], [uni], [1]], dtype=('S', 'U', 'i'))
assert t['col0'].dtype.kind == 'S'
assert t['col1'].dtype.kind == 'U'
assert t['col2'].dtype.kind == 'i'
t['col0'].description = 'col0'
t['col1'].description = 'col1'
t['col0'].meta['val'] = 'val0'
t['col1'].meta['val'] = 'val1'
# Unicode to bytestring
t1 = t.copy()
t1.convert_unicode_to_bytestring()
assert t1['col0'].dtype.kind == 'S'
assert t1['col1'].dtype.kind == 'S'
assert t1['col2'].dtype.kind == 'i'
# Meta made it through
assert t1['col0'].description == 'col0'
assert t1['col1'].description == 'col1'
assert t1['col0'].meta['val'] == 'val0'
assert t1['col1'].meta['val'] == 'val1'
# Need to de-fang the automatic unicode sandwiching of Table
assert np.array(t1['col0'])[0] == byt
assert np.array(t1['col1'])[0] == byt
assert np.array(t1['col2'])[0] == 1
# Bytestring to unicode
t1 = t.copy()
t1.convert_bytestring_to_unicode()
assert t1['col0'].dtype.kind == 'U'
assert t1['col1'].dtype.kind == 'U'
assert t1['col2'].dtype.kind == 'i'
# Meta made it through
assert t1['col0'].description == 'col0'
assert t1['col1'].description == 'col1'
assert t1['col0'].meta['val'] == 'val0'
assert t1['col1'].meta['val'] == 'val1'
# No need to de-fang the automatic unicode sandwiching of Table here, but
# do just for consistency to prove things are working.
assert np.array(t1['col0'])[0] == uni
assert np.array(t1['col1'])[0] == uni
assert np.array(t1['col2'])[0] == 1
def test_table_deletion():
"""
Regression test for the reference cycle discussed in
https://github.com/astropy/astropy/issues/2877
"""
deleted = set()
# A special table subclass which leaves a record when it is finalized
class TestTable(table.Table):
def __del__(self):
deleted.add(id(self))
t = TestTable({'a': [1, 2, 3]})
the_id = id(t)
assert t['a'].parent_table is t
del t
# Cleanup
gc.collect()
assert the_id in deleted
def test_nested_iteration():
"""
Regression test for issue 3358 where nested iteration over a single table fails.
"""
t = table.Table([[0, 1]], names=['a'])
out = []
for r1 in t:
for r2 in t:
out.append((r1['a'], r2['a']))
assert out == [(0, 0), (0, 1), (1, 0), (1, 1)]
def test_table_init_from_degenerate_arrays(table_types):
t = table_types.Table(np.array([]))
assert len(t.columns) == 0
with pytest.raises(ValueError):
t = table_types.Table(np.array(0))
t = table_types.Table(np.array([1, 2, 3]))
assert len(t.columns) == 3
@pytest.mark.skipif('not HAS_PANDAS')
class TestPandas:
def test_simple(self):
t = table.Table()
for endian in ['<', '>', '=']:
for kind in ['f', 'i']:
for byte in ['2', '4', '8']:
dtype = np.dtype(endian + kind + byte)
x = np.array([1, 2, 3], dtype=dtype)
t[endian + kind + byte] = x.newbyteorder(endian)
t['u'] = ['a', 'b', 'c']
t['s'] = ['a', 'b', 'c']
d = t.to_pandas()
for column in t.columns:
if column == 'u':
assert np.all(t['u'] == np.array(['a', 'b', 'c']))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
elif column == 's':
assert np.all(t['s'] == np.array(['a', 'b', 'c']))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
else:
# We should be able to compare exact values here
assert np.all(t[column] == d[column])
if t[column].dtype.isnative:
assert d[column].dtype == t[column].dtype
else:
assert d[column].dtype == t[column].byteswap().newbyteorder().dtype
# Regression test for astropy/astropy#1156 - the following code gave a
# ValueError: Big-endian buffer not supported on little-endian
# compiler. We now automatically swap the endian-ness to native order
# upon adding the arrays to the data frame.
# Explicitly testing little/big/native endian separately -
# regression for a case in astropy/astropy#11286 not caught by #3729.
d[['<i4', '>i4']]
d[['<f4', '>f4']]
t2 = table.Table.from_pandas(d)
for column in t.columns:
if column in ('u', 's'):
assert np.all(t[column] == t2[column])
else:
assert_allclose(t[column], t2[column])
if t[column].dtype.isnative:
assert t[column].dtype == t2[column].dtype
else:
assert t[column].byteswap().newbyteorder().dtype == t2[column].dtype
@pytest.mark.parametrize('unsigned', ['u', ''])
@pytest.mark.parametrize('bits', [8, 16, 32, 64])
def test_nullable_int(self, unsigned, bits):
np_dtype = f'{unsigned}int{bits}'
c = MaskedColumn([1, 2], mask=[False, True], dtype=np_dtype)
t = Table([c])
df = t.to_pandas()
pd_dtype = np_dtype.replace('i', 'I').replace('u', 'U')
assert str(df['col0'].dtype) == pd_dtype
t2 = Table.from_pandas(df)
assert str(t2['col0'].dtype) == np_dtype
assert np.all(t2['col0'].mask == [False, True])
assert np.all(t2['col0'] == c)
def test_2d(self):
t = table.Table()
t['a'] = [1, 2, 3]
t['b'] = np.ones((3, 2))
with pytest.raises(ValueError,
match='Cannot convert a table with multidimensional columns'):
t.to_pandas()
def test_mixin_pandas(self):
t = table.QTable()
for name in sorted(MIXIN_COLS):
if not name.startswith('ndarray'):
t[name] = MIXIN_COLS[name]
t['dt'] = TimeDelta([0, 2, 4, 6], format='sec')
tp = t.to_pandas()
t2 = table.Table.from_pandas(tp)
assert np.allclose(t2['quantity'], [0, 1, 2, 3])
assert np.allclose(t2['longitude'], [0., 1., 5., 6.])
assert np.allclose(t2['latitude'], [5., 6., 10., 11.])
assert np.allclose(t2['skycoord.ra'], [0, 1, 2, 3])
assert np.allclose(t2['skycoord.dec'], [0, 1, 2, 3])
assert np.allclose(t2['arraywrap'], [0, 1, 2, 3])
assert np.allclose(t2['arrayswap'], [0, 1, 2, 3])
assert np.allclose(t2['earthlocation.y'], [0, 110708, 547501, 654527], rtol=0, atol=1)
# For pandas, Time, TimeDelta are the mixins that round-trip the class
assert isinstance(t2['time'], Time)
assert np.allclose(t2['time'].jyear, [2000, 2001, 2002, 2003])
assert np.all(t2['time'].isot == ['2000-01-01T12:00:00.000',
'2000-12-31T18:00:00.000',
'2002-01-01T00:00:00.000',
'2003-01-01T06:00:00.000'])
assert t2['time'].format == 'isot'
# TimeDelta
assert isinstance(t2['dt'], TimeDelta)
assert np.allclose(t2['dt'].value, [0, 2, 4, 6])
assert t2['dt'].format == 'sec'
@pytest.mark.parametrize('use_IndexedTable', [False, True])
def test_to_pandas_index(self, use_IndexedTable):
"""Test to_pandas() with different indexing options.
This also tests the fix for #12014. The exception seen there is
reproduced here without the fix.
"""
import pandas as pd
class IndexedTable(table.QTable):
"""Always index the first column"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_index(self.colnames[0])
row_index = pd.RangeIndex(0, 2, 1)
tm_index = pd.DatetimeIndex(['1998-01-01', '2002-01-01'],
dtype='datetime64[ns]',
name='tm', freq=None)
tm = Time([1998, 2002], format='jyear')
x = [1, 2]
table_cls = IndexedTable if use_IndexedTable else table.QTable
t = table_cls([tm, x], names=['tm', 'x'])
tp = t.to_pandas()
if not use_IndexedTable:
assert np.all(tp.index == row_index)
tp = t.to_pandas(index='tm')
assert np.all(tp.index == tm_index)
t.add_index('tm')
tp = t.to_pandas()
assert np.all(tp.index == tm_index)
# Make sure writing to pandas didn't hack the original table
assert t['tm'].info.indices
tp = t.to_pandas(index=True)
assert np.all(tp.index == tm_index)
tp = t.to_pandas(index=False)
assert np.all(tp.index == row_index)
with pytest.raises(ValueError) as err:
t.to_pandas(index='not a column')
assert 'index must be None, False' in str(err.value)
def test_mixin_pandas_masked(self):
tm = Time([1, 2, 3], format='cxcsec')
dt = TimeDelta([1, 2, 3], format='sec')
tm[1] = np.ma.masked
dt[1] = np.ma.masked
t = table.QTable([tm, dt], names=['tm', 'dt'])
tp = t.to_pandas()
assert np.all(tp['tm'].isnull() == [False, True, False])
assert np.all(tp['dt'].isnull() == [False, True, False])
t2 = table.Table.from_pandas(tp)
assert np.all(t2['tm'].mask == tm.mask)
assert np.ma.allclose(t2['tm'].jd, tm.jd, rtol=1e-14, atol=1e-14)
assert np.all(t2['dt'].mask == dt.mask)
assert np.ma.allclose(t2['dt'].jd, dt.jd, rtol=1e-14, atol=1e-14)
def test_from_pandas_index(self):
tm = Time([1998, 2002], format='jyear')
x = [1, 2]
t = table.Table([tm, x], names=['tm', 'x'])
tp = t.to_pandas(index='tm')
t2 = table.Table.from_pandas(tp)
assert t2.colnames == ['x']
t2 = table.Table.from_pandas(tp, index=True)
assert t2.colnames == ['tm', 'x']
assert np.allclose(t2['tm'].jyear, tm.jyear)
@pytest.mark.parametrize('use_nullable_int', [True, False])
def test_masking(self, use_nullable_int):
t = table.Table(masked=True)
t['a'] = [1, 2, 3]
t['a'].mask = [True, False, True]
t['b'] = [1., 2., 3.]
t['b'].mask = [False, False, True]
t['u'] = ['a', 'b', 'c']
t['u'].mask = [False, True, False]
t['s'] = ['a', 'b', 'c']
t['s'].mask = [False, True, False]
# https://github.com/astropy/astropy/issues/7741
t['Source'] = [2584290278794471936, 2584290038276303744,
2584288728310999296]
t['Source'].mask = [False, False, False]
if use_nullable_int: # Default
# No warning with the default use_nullable_int=True
d = t.to_pandas(use_nullable_int=use_nullable_int)
else:
with pytest.warns(TableReplaceWarning,
match=r"converted column 'a' from int(32|64) to float64"):
d = t.to_pandas(use_nullable_int=use_nullable_int)
t2 = table.Table.from_pandas(d)
for name, column in t.columns.items():
assert np.all(column.data == t2[name].data)
if hasattr(t2[name], 'mask'):
assert np.all(column.mask == t2[name].mask)
if column.dtype.kind == 'i':
if np.any(column.mask) and not use_nullable_int:
assert t2[name].dtype.kind == 'f'
else:
assert t2[name].dtype.kind == 'i'
assert_array_equal(column.data,
t2[name].data.astype(column.dtype))
else:
if column.dtype.byteorder in ('=', '|'):
assert column.dtype == t2[name].dtype
else:
assert column.byteswap().newbyteorder().dtype == t2[name].dtype
def test_units(self):
import pandas as pd
import astropy.units as u
df = pd.DataFrame({'x': [1, 2, 3], 't': [1.3, 1.2, 1.8]})
t = table.Table.from_pandas(df, units={'x': u.m, 't': u.s})
assert t['x'].unit == u.m
assert t['t'].unit == u.s
# test error if not a mapping
with pytest.raises(TypeError):
table.Table.from_pandas(df, units=[u.m, u.s])
# test warning is raised if additional columns in units dict
with pytest.warns(UserWarning) as record:
table.Table.from_pandas(df, units={'x': u.m, 't': u.s, 'y': u.m})
assert len(record) == 1
assert "{'y'}" in record[0].message.args[0]
def test_to_pandas_masked_int_data_with__index(self):
data = {"data": [0, 1, 2], "index": [10, 11, 12]}
t = table.Table(data=data, masked=True)
t.add_index("index")
t["data"].mask = [1, 1, 0]
df = t.to_pandas()
assert df["data"].iloc[-1] == 2
@pytest.mark.usefixtures('table_types')
class TestReplaceColumn(SetupData):
def test_fail_replace_column(self, table_types):
"""Raise exception when trying to replace column via table.columns object"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError,
match=r"Cannot replace column 'a'. Use "
"Table.replace_column.. instead."):
t.columns['a'] = [1, 2, 3]
with pytest.raises(ValueError, match=r"column name not there is not in the table"):
t.replace_column('not there', [1, 2, 3])
with pytest.raises(ValueError, match=r"length of new column must match table length"):
t.replace_column('a', [1, 2])
def test_replace_column(self, table_types):
"""Replace existing column with a new column"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
ta = t['a']
tb = t['b']
vals = [1.2, 3.4, 5.6]
for col in (vals,
table_types.Column(vals),
table_types.Column(vals, name='a'),
table_types.Column(vals, name='b')):
t.replace_column('a', col)
assert np.all(t['a'] == vals)
assert t['a'] is not ta # New a column
assert t['b'] is tb # Original b column unchanged
assert t.colnames == ['a', 'b']
assert t['a'].meta == {}
assert t['a'].format is None
# Special case: replacing the only column can resize table
del t['b']
assert len(t) == 3
t['a'] = [1, 2]
assert len(t) == 2
def test_replace_index_column(self, table_types):
"""Replace index column and generate expected exception"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_index('a')
with pytest.raises(ValueError) as err:
t.replace_column('a', [1, 2, 3])
assert err.value.args[0] == 'cannot replace a table index column'
def test_replace_column_no_copy(self):
t = Table([[1, 2], [3, 4]], names=['a', 'b'])
a = np.array([1.5, 2.5])
t.replace_column('a', a, copy=False)
assert t['a'][0] == a[0]
t['a'][0] = 10
assert t['a'][0] == a[0]
class TestQTableColumnConversionCornerCases:
def test_replace_with_masked_col_with_units_in_qtable(self):
"""This is a small regression from #8902"""
t = QTable([[1, 2], [3, 4]], names=['a', 'b'])
t['a'] = MaskedColumn([5, 6], unit='m')
assert isinstance(t['a'], u.Quantity)
def test_do_not_replace_string_column_with_units_in_qtable(self):
t = QTable([[1*u.m]])
with pytest.warns(AstropyUserWarning, match='convert it to Quantity failed'):
t['a'] = Column(['a'], unit=u.m)
assert isinstance(t['a'], Column)
class Test__Astropy_Table__():
"""
Test initializing a Table subclass from a table-like object that
implements the __astropy_table__ interface method.
"""
class SimpleTable:
def __init__(self):
self.columns = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9] * u.m]
self.names = ['a', 'b', 'c']
self.meta = OrderedDict([('a', 1), ('b', 2)])
def __astropy_table__(self, cls, copy, **kwargs):
a, b, c = self.columns
c.info.name = 'c'
cols = [table.Column(a, name='a'),
table.MaskedColumn(b, name='b'),
c]
names = [col.info.name for col in cols]
return cls(cols, names=names, copy=copy, meta=kwargs or self.meta)
def test_simple_1(self):
"""Make a SimpleTable and convert to Table, QTable with copy=False, True"""
for table_cls in (table.Table, table.QTable):
col_c_class = u.Quantity if table_cls is table.QTable else table.Column
for cpy in (False, True):
st = self.SimpleTable()
# Test putting in a non-native kwarg `extra_meta` to Table initializer
t = table_cls(st, copy=cpy, extra_meta='extra!')
assert t.colnames == ['a', 'b', 'c']
assert t.meta == {'extra_meta': 'extra!'}
assert np.all(t['a'] == st.columns[0])
assert np.all(t['b'] == st.columns[1])
vals = t['c'].value if table_cls is table.QTable else t['c']
assert np.all(st.columns[2].value == vals)
assert isinstance(t['a'], table.Column)
assert isinstance(t['b'], table.MaskedColumn)
assert isinstance(t['c'], col_c_class)
assert t['c'].unit is u.m
assert type(t) is table_cls
# Copy being respected?
t['a'][0] = 10
assert st.columns[0][0] == 1 if cpy else 10
def test_simple_2(self):
"""Test converting a SimpleTable and changing column names and types"""
st = self.SimpleTable()
dtypes = [np.int32, np.float32, np.float16]
names = ['a', 'b', 'c']
meta = OrderedDict([('c', 3)])
t = table.Table(st, dtype=dtypes, names=names, meta=meta)
assert t.colnames == names
assert all(col.dtype.type is dtype
for col, dtype in zip(t.columns.values(), dtypes))
# The supplied meta is overrides the existing meta. Changed in astropy 3.2.
assert t.meta != st.meta
assert t.meta == meta
def test_kwargs_exception(self):
"""If extra kwargs provided but without initializing with a table-like
object, exception is raised"""
with pytest.raises(TypeError) as err:
table.Table([[1]], extra_meta='extra!')
assert '__init__() got unexpected keyword argument' in str(err.value)
class TestUpdate():
def _setup(self):
self.a = Column((1, 2, 3), name='a')
self.b = Column((4, 5, 6), name='b')
self.c = Column((7, 8, 9), name='c')
self.d = Column((10, 11, 12), name='d')
def test_different_lengths(self):
self._setup()
t1 = Table([self.a])
t2 = Table([self.b[:-1]])
msg = 'Inconsistent data column lengths'
with pytest.raises(ValueError, match=msg):
t1.update(t2)
# If update didn't succeed then t1 and t2 should not have changed.
assert t1.colnames == ['a']
assert np.all(t1['a'] == self.a)
assert t2.colnames == ['b']
assert np.all(t2['b'] == self.b[:-1])
def test_invalid_inputs(self):
# If input is invalid then nothing should be modified.
self._setup()
t = Table([self.a])
d = {'b': self.b, 'c': [0]}
msg = 'Inconsistent data column lengths: {1, 3}'
with pytest.raises(ValueError, match=msg):
t.update(d)
assert t.colnames == ['a']
assert np.all(t['a'] == self.a)
assert d == {'b': self.b, 'c': [0]}
def test_metadata_conflict(self):
self._setup()
t1 = Table([self.a], meta={'a': 0, 'b': [0], 'c': True})
t2 = Table([self.b], meta={'a': 1, 'b': [1]})
t2meta = copy.deepcopy(t2.meta)
t1.update(t2)
assert t1.meta == {'a': 1, 'b': [0, 1], 'c': True}
# t2 metadata should not have changed.
assert t2.meta == t2meta
def test_update(self):
self._setup()
t1 = Table([self.a, self.b])
t2 = Table([self.b, self.c])
t2['b'] += 1
t1.update(t2)
assert t1.colnames == ['a', 'b', 'c']
assert np.all(t1['a'] == self.a)
assert np.all(t1['b'] == self.b+1)
assert np.all(t1['c'] == self.c)
# t2 should not have changed.
assert t2.colnames == ['b', 'c']
assert np.all(t2['b'] == self.b+1)
assert np.all(t2['c'] == self.c)
d = {'b': list(self.b), 'd': list(self.d)}
dc = copy.deepcopy(d)
t2.update(d)
assert t2.colnames == ['b', 'c', 'd']
assert np.all(t2['b'] == self.b)
assert np.all(t2['c'] == self.c)
assert np.all(t2['d'] == self.d)
# d should not have changed.
assert d == dc
# Columns were copied, so changing t2 shouldn't have affected t1.
assert t1.colnames == ['a', 'b', 'c']
assert np.all(t1['a'] == self.a)
assert np.all(t1['b'] == self.b+1)
assert np.all(t1['c'] == self.c)
def test_update_without_copy(self):
self._setup()
t1 = Table([self.a, self.b])
t2 = Table([self.b, self.c])
t1.update(t2, copy=False)
t2['b'] -= 1
assert t1.colnames == ['a', 'b', 'c']
assert np.all(t1['a'] == self.a)
assert np.all(t1['b'] == self.b-1)
assert np.all(t1['c'] == self.c)
d = {'b': np.array(self.b), 'd': np.array(self.d)}
t2.update(d, copy=False)
d['b'] *= 2
assert t2.colnames == ['b', 'c', 'd']
assert np.all(t2['b'] == 2*self.b)
assert np.all(t2['c'] == self.c)
assert np.all(t2['d'] == self.d)
def test_table_meta_copy():
"""
Test no copy vs light (key) copy vs deep copy of table meta for different
situations. #8404.
"""
t = table.Table([[1]])
meta = {1: [1, 2]}
# Assigning meta directly implies using direct object reference
t.meta = meta
assert t.meta is meta
# Table slice implies key copy, so values are unchanged
t2 = t[:]
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is t.meta[1] # Value IS the list same object
# Table init with copy=False implies key copy
t2 = table.Table(t, copy=False)
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is t.meta[1] # Value IS the same list object
# Table init with copy=True implies deep copy
t2 = table.Table(t, copy=True)
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is not t.meta[1] # Value is NOT the same list object
def test_table_meta_copy_with_meta_arg():
"""
Test no copy vs light (key) copy vs deep copy of table meta when meta is
supplied as a table init argument. #8404.
"""
meta = {1: [1, 2]}
meta2 = {2: [3, 4]}
t = table.Table([[1]], meta=meta, copy=False)
assert t.meta is meta
t = table.Table([[1]], meta=meta) # default copy=True
assert t.meta is not meta
assert t.meta == meta
# Test initializing from existing table with meta with copy=False
t2 = table.Table(t, meta=meta2, copy=False)
assert t2.meta is meta2
assert t2.meta != t.meta # Change behavior in #8404
# Test initializing from existing table with meta with default copy=True
t2 = table.Table(t, meta=meta2)
assert t2.meta is not meta2
assert t2.meta != t.meta # Change behavior in #8404
# Table init with copy=True and empty dict meta gets that empty dict
t2 = table.Table(t, copy=True, meta={})
assert t2.meta == {}
# Table init with copy=True and kwarg meta=None gets the original table dict.
# This is a somewhat ambiguous case because it could be interpreted as the
# user wanting NO meta set on the output. This could be implemented by inspecting
# call args.
t2 = table.Table(t, copy=True, meta=None)
assert t2.meta == t.meta
# Test initializing empty table with meta with copy=False
t = table.Table(meta=meta, copy=False)
assert t.meta is meta
assert t.meta[1] is meta[1]
# Test initializing empty table with meta with default copy=True (deepcopy meta)
t = table.Table(meta=meta)
assert t.meta is not meta
assert t.meta == meta
assert t.meta[1] is not meta[1]
def test_replace_column_qtable():
"""Replace existing Quantity column with a new column in a QTable"""
a = [1, 2, 3] * u.m
b = [4, 5, 6]
t = table.QTable([a, b], names=['a', 'b'])
ta = t['a']
tb = t['b']
ta.info.meta = {'aa': [0, 1, 2, 3, 4]}
ta.info.format = '%f'
t.replace_column('a', a.to('cm'))
assert np.all(t['a'] == ta)
assert t['a'] is not ta # New a column
assert t['b'] is tb # Original b column unchanged
assert t.colnames == ['a', 'b']
assert t['a'].info.meta is None
assert t['a'].info.format is None
def test_replace_update_column_via_setitem():
"""
Test table update like ``t['a'] = value``. This leverages off the
already well-tested ``replace_column`` and in-place update
``t['a'][:] = value``, so this testing is fairly light.
"""
a = [1, 2] * u.m
b = [3, 4]
t = table.QTable([a, b], names=['a', 'b'])
assert isinstance(t['a'], u.Quantity)
# Inplace update
ta = t['a']
t['a'] = 5 * u.m
assert np.all(t['a'] == [5, 5] * u.m)
assert t['a'] is ta
# Replace
t['a'] = [5, 6]
assert np.all(t['a'] == [5, 6])
assert isinstance(t['a'], table.Column)
assert t['a'] is not ta
def test_replace_update_column_via_setitem_warnings_normal():
"""
Test warnings related to table replace change in #5556:
Normal warning-free replace
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = 0 # in-place update
t['a'] = [10, 20, 30] # replace column
def test_replace_update_column_via_setitem_warnings_slice():
"""
Test warnings related to table replace change in #5556:
Replace a slice, one warning.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t2 = t[:2]
t2['a'] = 0 # in-place slice update
assert np.all(t['a'] == [0, 0, 3])
with pytest.warns(TableReplaceWarning, match="replaced column 'a' "
"which looks like an array slice") as w:
t2['a'] = [10, 20] # replace slice
assert len(w) == 1
def test_replace_update_column_via_setitem_warnings_attributes():
"""
Test warnings related to table replace change in #5556:
Lost attributes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
t['a'].unit = 'm'
with pytest.warns(TableReplaceWarning, match=r"replaced column 'a' "
r"and column attributes \['unit'\]") as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = [10, 20, 30]
assert len(w) == 1
def test_replace_update_column_via_setitem_warnings_refcount():
"""
Test warnings related to table replace change in #5556:
Reference count changes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
ta = t['a'] # noqa : Generate an extra reference to original column
with pytest.warns(TableReplaceWarning, match="replaced column 'a' and the "
"number of references") as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = [10, 20, 30]
assert len(w) == 1
def test_replace_update_column_via_setitem_warnings_always():
"""
Test warnings related to table replace change in #5556:
Test 'always' setting that raises warning for any replace.
"""
from inspect import currentframe, getframeinfo
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with table.conf.set_temp('replace_warnings', ['always']):
t['a'] = 0 # in-place slice update
with pytest.warns(TableReplaceWarning, match="replaced column 'a'") as w:
frameinfo = getframeinfo(currentframe())
t['a'] = [10, 20, 30] # replace column
assert len(w) == 1
# Make sure the warning points back to the user code line
assert w[0].lineno == frameinfo.lineno + 1
assert 'test_table' in w[0].filename
def test_replace_update_column_via_setitem_replace_inplace():
"""
Test the replace_inplace config option related to #5556. In this
case no replace is done.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
ta = t['a']
t['a'].unit = 'm'
with table.conf.set_temp('replace_inplace', True):
with table.conf.set_temp('replace_warnings',
['always', 'refcount', 'attributes', 'slice']):
t['a'] = 0 # in-place update
assert ta is t['a']
t['a'] = [10, 20, 30] # normally replaces column, but not now
assert ta is t['a']
assert np.all(t['a'] == [10, 20, 30])
def test_primary_key_is_inherited():
"""Test whether a new Table inherits the primary_key attribute from
its parent Table. Issue #4672"""
t = table.Table([(2, 3, 2, 1), (8, 7, 6, 5)], names=('a', 'b'))
t.add_index('a')
original_key = t.primary_key
# can't test if tuples are equal, so just check content
assert original_key[0] == 'a'
t2 = t[:]
t3 = t.copy()
t4 = table.Table(t)
# test whether the reference is the same in the following
assert original_key == t2.primary_key
assert original_key == t3.primary_key
assert original_key == t4.primary_key
# just test one element, assume rest are equal if assert passes
assert t.loc[1] == t2.loc[1]
assert t.loc[1] == t3.loc[1]
assert t.loc[1] == t4.loc[1]
def test_qtable_read_for_ipac_table_with_char_columns():
'''Test that a char column of a QTable is assigned no unit and not
a dimensionless unit, otherwise conversion of reader output to
QTable fails.'''
t1 = table.QTable([["A"]], names="B")
out = StringIO()
t1.write(out, format="ascii.ipac")
t2 = table.QTable.read(out.getvalue(), format="ascii.ipac", guess=False)
assert t2["B"].unit is None
def test_create_table_from_final_row():
"""Regression test for issue #8422: passing the last row of a table into
Table should return a new table containing that row."""
t1 = table.Table([(1, 2)], names=['col'])
row = t1[-1]
t2 = table.Table(row)['col']
assert t2[0] == 2
def test_key_values_in_as_array():
# Test for checking column slicing using key_values in Table.as_array()
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
# Creating a table with three columns
t1 = table.Table(rows=data_rows, names=('a', 'b', 'c'),
meta={'name': 'first table'},
dtype=('i4', 'f8', 'S1'))
# Values of sliced column a,b is stored in a numpy array
a = np.array([(1, 2.), (4, 5.), (5, 8.2)],
dtype=[('a', '<i4'), ('b', '<f8')])
# Values for sliced column c is stored in a numpy array
b = np.array([(b'x',), (b'y',), (b'z',)], dtype=[('c', 'S1')])
# Comparing initialised array with sliced array using Table.as_array()
assert np.array_equal(a, t1.as_array(names=['a', 'b']))
assert np.array_equal(b, t1.as_array(names=['c']))
def test_tolist():
t = table.Table([[1, 2, 3], [1.1, 2.2, 3.3], [b'foo', b'bar', b'hello']],
names=('a', 'b', 'c'))
assert t['a'].tolist() == [1, 2, 3]
assert_array_equal(t['b'].tolist(), [1.1, 2.2, 3.3])
assert t['c'].tolist() == ['foo', 'bar', 'hello']
assert isinstance(t['a'].tolist()[0], int)
assert isinstance(t['b'].tolist()[0], float)
assert isinstance(t['c'].tolist()[0], str)
t = table.Table([[[1, 2], [3, 4]],
[[b'foo', b'bar'], [b'hello', b'world']]],
names=('a', 'c'))
assert t['a'].tolist() == [[1, 2], [3, 4]]
assert t['c'].tolist() == [['foo', 'bar'], ['hello', 'world']]
assert isinstance(t['a'].tolist()[0][0], int)
assert isinstance(t['c'].tolist()[0][0], str)
class MyTable(Table):
foo = TableAttribute()
bar = TableAttribute(default=[])
baz = TableAttribute(default=1)
def test_table_attribute():
assert repr(MyTable.baz) == '<TableAttribute name=baz default=1>'
t = MyTable([[1, 2]])
# __attributes__ created on the fly on the first access of an attribute
# that has a non-None default.
assert '__attributes__' not in t.meta
assert t.foo is None
assert '__attributes__' not in t.meta
assert t.baz == 1
assert '__attributes__' in t.meta
t.bar.append(2.0)
assert t.bar == [2.0]
assert t.baz == 1
t.baz = 'baz'
assert t.baz == 'baz'
# Table attributes round-trip through pickle
tp = pickle.loads(pickle.dumps(t))
assert tp.foo is None
assert tp.baz == 'baz'
assert tp.bar == [2.0]
# Allow initialization of attributes in table creation, with / without data
for data in None, [[1, 2]]:
t2 = MyTable(data, foo=3, bar='bar', baz='baz')
assert t2.foo == 3
assert t2.bar == 'bar'
assert t2.baz == 'baz'
# Initializing from an existing MyTable works, with and without kwarg attrs
t3 = MyTable(t2)
assert t3.foo == 3
assert t3.bar == 'bar'
assert t3.baz == 'baz'
t3 = MyTable(t2, foo=5, bar='fubar')
assert t3.foo == 5
assert t3.bar == 'fubar'
assert t3.baz == 'baz'
# Deleting attributes removes it from attributes
del t.baz
assert 'baz' not in t.meta['__attributes__']
del t.bar
assert '__attributes__' not in t.meta
def test_table_attribute_ecsv():
# Table attribute round-trip through ECSV
t = MyTable([[1, 2]], bar=[2.0], baz='baz')
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = MyTable.read(out.getvalue(), format='ascii.ecsv')
assert t2.foo is None
assert t2.bar == [2.0]
assert t2.baz == 'baz'
def test_table_attribute_fail():
# Code raises ValueError(f'{attr} not allowed as TableAttribute') but in this
# context it gets re-raised as a RuntimeError during class definition.
with pytest.raises(RuntimeError, match='Error calling __set_name__'):
class MyTable2(Table):
descriptions = TableAttribute() # Conflicts with init arg
with pytest.raises(RuntimeError, match='Error calling __set_name__'):
class MyTable3(Table):
colnames = TableAttribute() # Conflicts with built-in property
def test_set_units_fail():
dat = [[1.0, 2.0], ['aa', 'bb']]
with pytest.raises(ValueError, match='sequence of unit values must match number of columns'):
Table(dat, units=[u.m])
with pytest.raises(ValueError, match='invalid column name c for setting unit attribute'):
Table(dat, units={'c': u.m})
def test_set_units():
dat = [[1.0, 2.0], ['aa', 'bb'], [3, 4]]
exp_units = (u.m, None, None)
for cls in Table, QTable:
for units in ({'a': u.m, 'c': ''}, exp_units):
qt = cls(dat, units=units, names=['a', 'b', 'c'])
if cls is QTable:
assert isinstance(qt['a'], u.Quantity)
assert isinstance(qt['b'], table.Column)
assert isinstance(qt['c'], table.Column)
for col, unit in zip(qt.itercols(), exp_units):
assert col.info.unit is unit
def test_set_descriptions():
dat = [[1.0, 2.0], ['aa', 'bb']]
exp_descriptions = ('my description', None)
for cls in Table, QTable:
for descriptions in ({'a': 'my description'}, exp_descriptions):
qt = cls(dat, descriptions=descriptions, names=['a', 'b'])
for col, description in zip(qt.itercols(), exp_descriptions):
assert col.info.description == description
def test_set_units_from_row():
text = ['a,b',
',s',
'1,2',
'3,4']
units = Table.read(text, format='ascii', data_start=1, data_end=2)[0]
t = Table.read(text, format='ascii', data_start=2, units=units)
assert isinstance(units, table.Row)
assert t['a'].info.unit is None
assert t['b'].info.unit is u.s
def test_set_units_descriptions_read():
"""Test setting units and descriptions via Table.read. The test here
is less comprehensive because the implementation is exactly the same
as for Table.__init__ (calling Table._set_column_attribute) """
for cls in Table, QTable:
t = cls.read(['a b', '1 2'],
format='ascii',
units=[u.m, u.s],
descriptions=['hi', 'there'])
assert t['a'].info.unit is u.m
assert t['b'].info.unit is u.s
assert t['a'].info.description == 'hi'
assert t['b'].info.description == 'there'
def test_broadcasting_8933():
"""Explicitly check re-work of code related to broadcasting in #8933"""
t = table.Table([[1, 2]]) # Length=2 table
t['a'] = [[3, 4]] # Can broadcast if ndim > 1 and shape[0] == 1
t['b'] = 5
t['c'] = [1] # Treat as broadcastable scalar, not length=1 array (which would fail)
assert np.all(t['a'] == [[3, 4], [3, 4]])
assert np.all(t['b'] == [5, 5])
assert np.all(t['c'] == [1, 1])
# Test that broadcasted column is writeable
t['c'][1] = 10
assert np.all(t['c'] == [1, 10])
def test_custom_masked_column_in_nonmasked_table():
"""Test the refactor and change in column upgrades introduced
in 95902650f. This fixes a regression introduced by #8789
(Change behavior of Table regarding masked columns)."""
class MyMaskedColumn(table.MaskedColumn):
pass
class MySubMaskedColumn(MyMaskedColumn):
pass
class MyColumn(table.Column):
pass
class MySubColumn(MyColumn):
pass
class MyTable(table.Table):
Column = MyColumn
MaskedColumn = MyMaskedColumn
a = table.Column([1])
b = table.MaskedColumn([2], mask=[True])
c = MyMaskedColumn([3], mask=[True])
d = MySubColumn([4])
e = MySubMaskedColumn([5], mask=[True])
# Two different pathways for making table
t1 = MyTable([a, b, c, d, e], names=['a', 'b', 'c', 'd', 'e'])
t2 = MyTable()
t2['a'] = a
t2['b'] = b
t2['c'] = c
t2['d'] = d
t2['e'] = e
for t in (t1, t2):
assert type(t['a']) is MyColumn
assert type(t['b']) is MyMaskedColumn # upgrade
assert type(t['c']) is MyMaskedColumn
assert type(t['d']) is MySubColumn
assert type(t['e']) is MySubMaskedColumn # sub-class not downgraded
def test_sort_with_mutable_skycoord():
"""Test sorting a table that has a mutable column such as SkyCoord.
In this case the sort is done in-place
"""
t = Table([[2, 1], SkyCoord([4, 3], [6, 5], unit='deg,deg')], names=['a', 'sc'])
meta = {'a': [1, 2]}
ta = t['a']
tsc = t['sc']
t['sc'].info.meta = meta
t.sort('a')
assert np.all(t['a'] == [1, 2])
assert np.allclose(t['sc'].ra.to_value(u.deg), [3, 4])
assert np.allclose(t['sc'].dec.to_value(u.deg), [5, 6])
assert t['a'] is ta
assert t['sc'] is tsc
# Prior to astropy 4.1 this was a deep copy of SkyCoord column; after 4.1
# it is a reference.
t['sc'].info.meta['a'][0] = 100
assert meta['a'][0] == 100
def test_sort_with_non_mutable():
"""Test sorting a table that has a non-mutable column.
"""
t = Table([[2, 1], [3, 4]], names=['a', 'b'])
ta = t['a']
tb = t['b']
t['b'].setflags(write=False)
meta = {'a': [1, 2]}
t['b'].info.meta = meta
t.sort('a')
assert np.all(t['a'] == [1, 2])
assert np.all(t['b'] == [4, 3])
assert ta is t['a']
assert tb is not t['b']
# Prior to astropy 4.1 this was a deep copy of SkyCoord column; after 4.1
# it is a reference.
t['b'].info.meta['a'][0] = 100
assert meta['a'][0] == 1
def test_init_with_list_of_masked_arrays():
"""Test the fix for #8977"""
m0 = np.ma.array([0, 1, 2], mask=[True, False, True])
m1 = np.ma.array([3, 4, 5], mask=[False, True, False])
mc = [m0, m1]
# Test _init_from_list
t = table.Table([mc], names=['a'])
# Test add_column
t['b'] = [m1, m0]
assert t['a'].shape == (2, 3)
assert np.all(t['a'][0] == m0)
assert np.all(t['a'][1] == m1)
assert np.all(t['a'][0].mask == m0.mask)
assert np.all(t['a'][1].mask == m1.mask)
assert t['b'].shape == (2, 3)
assert np.all(t['b'][0] == m1)
assert np.all(t['b'][1] == m0)
assert np.all(t['b'][0].mask == m1.mask)
assert np.all(t['b'][1].mask == m0.mask)
def test_data_to_col_convert_strategy():
"""Test the update to how data_to_col works (#8972), using the regression
example from #8971.
"""
t = table.Table([[0, 1]])
t['a'] = 1
t['b'] = np.int64(2) # Failed previously
assert np.all(t['a'] == [1, 1])
assert np.all(t['b'] == [2, 2])
def test_structured_masked_column():
"""Test that adding a masked ndarray with a structured dtype works"""
dtype = np.dtype([('z', 'f8'), ('x', 'f8'), ('y', 'i4')])
t = Table()
t['a'] = np.ma.array([(1, 2, 3),
(4, 5, 6)],
mask=[(False, False, True),
(False, True, False)],
dtype=dtype)
assert np.all(t['a']['z'].mask == [False, False])
assert np.all(t['a']['x'].mask == [False, True])
assert np.all(t['a']['y'].mask == [True, False])
assert isinstance(t['a'], MaskedColumn)
def test_rows_with_mixins():
"""Test for #9165 to allow adding a list of mixin objects.
Also test for fix to #9357 where group_by() failed due to
mixin object not having info.indices set to [].
"""
tm = Time([1, 2], format='cxcsec')
q = [1, 2] * u.m
mixed1 = [1 * u.m, 2] # Mixed input, fails to convert to Quantity
mixed2 = [2, 1 * u.m] # Mixed input, not detected as potential mixin
rows = [(1, q[0], tm[0]),
(2, q[1], tm[1])]
t = table.QTable(rows=rows)
t['a'] = [q[0], q[1]]
t['b'] = [tm[0], tm[1]]
t['m1'] = mixed1
t['m2'] = mixed2
assert np.all(t['col1'] == q)
assert np.all(t['col2'] == tm)
assert np.all(t['a'] == q)
assert np.all(t['b'] == tm)
assert np.all(t['m1'][ii] == mixed1[ii] for ii in range(2))
assert np.all(t['m2'][ii] == mixed2[ii] for ii in range(2))
assert type(t['m1']) is table.Column
assert t['m1'].dtype is np.dtype(object)
assert type(t['m2']) is table.Column
assert t['m2'].dtype is np.dtype(object)
# Ensure group_by() runs without failing for sortable columns.
# The columns 'm1', and 'm2' are object dtype and not sortable.
for name in ['col0', 'col1', 'col2', 'a', 'b']:
t.group_by(name)
# For good measure include exactly the failure in #9357 in which the
# list of Time() objects is in the Table initializer.
mjds = [Time(58000, format="mjd")]
t = Table([mjds, ["gbt"]], names=("mjd", "obs"))
t.group_by("obs")
def test_iterrows():
dat = [(1, 2, 3),
(4, 5, 6),
(7, 8, 6)]
t = table.Table(rows=dat, names=('a', 'b', 'c'))
c_s = []
a_s = []
for c, a in t.iterrows('c', 'a'):
a_s.append(a)
c_s.append(c)
assert np.all(t['a'] == a_s)
assert np.all(t['c'] == c_s)
rows = [row for row in t.iterrows()]
assert rows == dat
with pytest.raises(ValueError, match='d is not a valid column name'):
t.iterrows('d')
def test_values_and_types():
dat = [(1, 2, 3),
(4, 5, 6),
(7, 8, 6)]
t = table.Table(rows=dat, names=('a', 'b', 'c'))
assert isinstance(t.values(), type(OrderedDict().values()))
assert isinstance(t.columns.values(), type(OrderedDict().values()))
assert isinstance(t.columns.keys(), type(OrderedDict().keys()))
for i in t.values():
assert isinstance(i, table.column.Column)
def test_items():
dat = [(1, 2, 3),
(4, 5, 6),
(7, 8, 9)]
t = table.Table(rows=dat, names=('a', 'b', 'c'))
assert isinstance(t.items(), type(OrderedDict({}).items()))
for i in list(t.items()):
assert isinstance(i, tuple)
def test_read_write_not_replaceable():
t = table.Table()
with pytest.raises(AttributeError):
t.read = 'fake_read'
with pytest.raises(AttributeError):
t.write = 'fake_write'
def test_keep_columns_with_generator():
# Regression test for #12529
t = table.table_helpers.simple_table(1)
t.keep_columns(col for col in t.colnames if col == 'a')
assert t.colnames == ['a']
def test_remove_columns_with_generator():
# Regression test for #12529
t = table.table_helpers.simple_table(1)
t.remove_columns(col for col in t.colnames if col == 'a')
assert t.colnames == ['b', 'c']
def test_keep_columns_invalid_names_messages():
t = table.table_helpers.simple_table(1)
with pytest.raises(KeyError, match='column "d" does not exist'):
t.keep_columns(['c', 'd'])
with pytest.raises(KeyError,
match='columns {\'[de]\', \'[de]\'} do not exist'):
t.keep_columns(['c', 'd', 'e'])
def test_remove_columns_invalid_names_messages():
t = table.table_helpers.simple_table(1)
with pytest.raises(KeyError, match='column "d" does not exist'):
t.remove_columns(['c', 'd'])
with pytest.raises(KeyError,
match='columns {\'[de]\', \'[de]\'} do not exist'):
t.remove_columns(['c', 'd', 'e'])
@pytest.mark.parametrize("path_type", ['str', 'Path'])
def test_read_write_tilde_path(path_type, home_is_tmpdir):
if path_type == 'str':
test_file = os.path.join('~', 'test.csv')
else:
test_file = pathlib.Path('~', 'test.csv')
t1 = Table()
t1['a'] = [1, 2, 3]
t1.write(test_file)
t2 = Table.read(test_file)
assert np.all(t2['a'] == [1, 2, 3])
# Ensure the data wasn't written to the literal tilde-prefixed path
assert not os.path.exists(test_file)
|
e93854d462738c1a3cbefa2300f9a1fc2db8b559e4434a6e050579482dd3f914 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from io import StringIO
from collections import OrderedDict
from copy import deepcopy
import numpy as np
import pytest
from astropy import units as u
from astropy import time
from astropy import coordinates
from astropy import table
from astropy.table.info import serialize_method_as
from astropy.utils.data_info import data_info_factory, dtype_info_name
from astropy.table.table_helpers import simple_table
def test_table_info_attributes(table_types):
"""
Test the info() method of printing a summary of table column attributes
"""
a = np.array([1, 2, 3], dtype='int32')
b = np.array([1, 2, 3], dtype='float32')
c = np.array(['a', 'c', 'e'], dtype='|S1')
t = table_types.Table([a, b, c], names=['a', 'b', 'c'])
# Minimal output for a typical table
tinfo = t.info(out=None)
subcls = ['class'] if table_types.Table.__name__ == 'MyTable' else []
assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format',
'description', 'class', 'n_bad', 'length']
assert np.all(tinfo['name'] == ['a', 'b', 'c'])
assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1')])
if subcls:
assert np.all(tinfo['class'] == ['MyColumn'] * 3)
# All output fields including a mixin column
t['d'] = [1, 2, 3] * u.m
t['d'].description = 'quantity'
t['a'].format = '%02d'
t['e'] = time.Time([1, 2, 3], format='mjd')
t['e'].info.description = 'time'
t['f'] = coordinates.SkyCoord([1, 2, 3], [1, 2, 3], unit='deg')
t['f'].info.description = 'skycoord'
tinfo = t.info(out=None)
assert np.all(tinfo['name'] == 'a b c d e f'.split())
assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1'), 'float64',
'object', 'object'])
assert np.all(tinfo['unit'] == ['', '', '', 'm', '', 'deg,deg'])
assert np.all(tinfo['format'] == ['%02d', '', '', '', '', ''])
assert np.all(tinfo['description'] == ['', '', '', 'quantity', 'time', 'skycoord'])
cls = t.ColumnClass.__name__
assert np.all(tinfo['class'] == [cls, cls, cls, cls, 'Time', 'SkyCoord'])
# Test that repr(t.info) is same as t.info()
out = StringIO()
t.info(out=out)
assert repr(t.info) == out.getvalue()
def test_table_info_stats(table_types):
"""
Test the info() method of printing a summary of table column statistics
"""
a = np.array([1, 2, 1, 2], dtype='int32')
b = np.array([1, 2, 1, 2], dtype='float32')
c = np.array(['a', 'c', 'e', 'f'], dtype='|S1')
d = time.Time([1, 2, 1, 2], format='mjd')
t = table_types.Table([a, b, c, d], names=['a', 'b', 'c', 'd'])
# option = 'stats'
masked = 'masked=True ' if t.masked else ''
out = StringIO()
t.info('stats', out=out)
table_header_line = f'<{t.__class__.__name__} {masked}length=4>'
exp = [table_header_line,
'name mean std min max',
'---- ---- --- --- ---',
' a 1.5 0.5 1 2',
' b 1.5 0.5 1 2',
' c -- -- -- --',
' d -- -- 1.0 2.0']
assert out.getvalue().splitlines() == exp
# option = ['attributes', 'stats']
tinfo = t.info(['attributes', 'stats'], out=None)
assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format', 'description',
'class', 'mean', 'std', 'min', 'max', 'n_bad', 'length']
assert np.all(tinfo['mean'] == ['1.5', '1.5', '--', '--'])
assert np.all(tinfo['std'] == ['0.5', '0.5', '--', '--'])
assert np.all(tinfo['min'] == ['1', '1', '--', '1.0'])
assert np.all(tinfo['max'] == ['2', '2', '--', '2.0'])
out = StringIO()
t.info('stats', out=out)
exp = [table_header_line,
'name mean std min max',
'---- ---- --- --- ---',
' a 1.5 0.5 1 2',
' b 1.5 0.5 1 2',
' c -- -- -- --',
' d -- -- 1.0 2.0']
assert out.getvalue().splitlines() == exp
# option = ['attributes', custom]
custom = data_info_factory(names=['sum', 'first'],
funcs=[np.sum, lambda col: col[0]])
out = StringIO()
tinfo = t.info(['attributes', custom], out=None)
assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format', 'description',
'class', 'sum', 'first', 'n_bad', 'length']
assert np.all(tinfo['name'] == ['a', 'b', 'c', 'd'])
assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1'), 'object'])
assert np.all(tinfo['sum'] == ['6', '6', '--', '--'])
assert np.all(tinfo['first'] == ['1', '1', 'a', '1.0'])
def test_data_info():
"""
Test getting info for just a column.
"""
cols = [table.Column([1.0, 2.0, np.nan], name='name',
description='description', unit='m/s'),
table.MaskedColumn([1.0, 2.0, 3.0], name='name',
description='description', unit='m/s',
mask=[False, False, True])]
for c in cols:
# Test getting the full ordered dict
cinfo = c.info(out=None)
assert cinfo == OrderedDict([('name', 'name'),
('dtype', 'float64'),
('shape', ''),
('unit', 'm / s'),
('format', ''),
('description', 'description'),
('class', type(c).__name__),
('n_bad', 1),
('length', 3)])
# Test the console (string) version which omits trivial values
out = StringIO()
c.info(out=out)
exp = ['name = name',
'dtype = float64',
'unit = m / s',
'description = description',
f'class = {type(c).__name__}',
'n_bad = 1',
'length = 3']
assert out.getvalue().splitlines() == exp
# repr(c.info) gives the same as c.info()
assert repr(c.info) == out.getvalue()
# Test stats info
cinfo = c.info('stats', out=None)
assert cinfo == OrderedDict([('name', 'name'),
('mean', '1.5'),
('std', '0.5'),
('min', '1'),
('max', '2'),
('n_bad', 1),
('length', 3)])
def test_data_info_subclass():
class Column(table.Column):
"""
Confusingly named Column on purpose, but that is legal.
"""
pass
for data in ([], [1, 2]):
c = Column(data, dtype='int64')
cinfo = c.info(out=None)
assert cinfo == OrderedDict([('dtype', 'int64'),
('shape', ''),
('unit', ''),
('format', ''),
('description', ''),
('class', 'Column'),
('n_bad', 0),
('length', len(data))])
def test_scalar_info():
"""
Make sure info works with scalar values
"""
c = time.Time('2000:001')
cinfo = c.info(out=None)
assert cinfo['n_bad'] == 0
assert 'length' not in cinfo
def test_empty_table():
t = table.Table()
out = StringIO()
t.info(out=out)
exp = ['<Table length=0>', '<No columns>']
assert out.getvalue().splitlines() == exp
def test_class_attribute():
"""
Test that class info column is suppressed only for identical non-mixin
columns.
"""
vals = [[1] * u.m, [2] * u.m]
texp = ['<Table length=1>',
'name dtype unit',
'---- ------- ----',
'col0 float64 m',
'col1 float64 m']
qexp = ['<QTable length=1>',
'name dtype unit class ',
'---- ------- ---- --------',
'col0 float64 m Quantity',
'col1 float64 m Quantity']
for table_cls, exp in ((table.Table, texp),
(table.QTable, qexp)):
t = table_cls(vals)
out = StringIO()
t.info(out=out)
assert out.getvalue().splitlines() == exp
def test_ignore_warnings():
t = table.Table([[np.nan, np.nan]])
with warnings.catch_warnings(record=True) as warns:
t.info('stats', out=None)
assert len(warns) == 0
def test_no_deprecation_warning():
# regression test for #5459, where numpy deprecation warnings were
# emitted unnecessarily.
t = simple_table()
with warnings.catch_warnings(record=True) as warns:
t.info()
assert len(warns) == 0
def test_lost_parent_error():
c = table.Column([1, 2, 3], name='a')
with pytest.raises(AttributeError,
match='failed to access "info" attribute'):
c[:].info.name
def test_info_serialize_method():
"""
Unit test of context manager to set info.serialize_method. Normally just
used to set this for writing a Table to file (FITS, ECSV, HDF5).
"""
t = table.Table({'tm': time.Time([1, 2], format='cxcsec'),
'sc': coordinates.SkyCoord([1, 2], [1, 2], unit='deg'),
'mc': table.MaskedColumn([1, 2], mask=[True, False]),
'mc2': table.MaskedColumn([1, 2], mask=[True, False])}
)
origs = {}
for name in ('tm', 'mc', 'mc2'):
origs[name] = deepcopy(t[name].info.serialize_method)
# Test setting by name and getting back to originals
with serialize_method_as(t, {'tm': 'test_tm', 'mc': 'test_mc'}):
for name in ('tm', 'mc'):
assert all(t[name].info.serialize_method[key] == 'test_' + name
for key in t[name].info.serialize_method)
assert t['mc2'].info.serialize_method == origs['mc2']
assert not hasattr(t['sc'].info, 'serialize_method')
for name in ('tm', 'mc', 'mc2'):
assert t[name].info.serialize_method == origs[name] # dict compare
assert not hasattr(t['sc'].info, 'serialize_method')
# Test setting by name and class, where name takes precedence. Also
# test that it works for subclasses.
with serialize_method_as(t, {'tm': 'test_tm', 'mc': 'test_mc',
table.Column: 'test_mc2'}):
for name in ('tm', 'mc', 'mc2'):
assert all(t[name].info.serialize_method[key] == 'test_' + name
for key in t[name].info.serialize_method)
assert not hasattr(t['sc'].info, 'serialize_method')
for name in ('tm', 'mc', 'mc2'):
assert t[name].info.serialize_method == origs[name] # dict compare
assert not hasattr(t['sc'].info, 'serialize_method')
# Test supplying a single string that all applies to all columns with
# a serialize_method.
with serialize_method_as(t, 'test'):
for name in ('tm', 'mc', 'mc2'):
assert all(t[name].info.serialize_method[key] == 'test'
for key in t[name].info.serialize_method)
assert not hasattr(t['sc'].info, 'serialize_method')
for name in ('tm', 'mc', 'mc2'):
assert t[name].info.serialize_method == origs[name] # dict compare
assert not hasattr(t['sc'].info, 'serialize_method')
def test_info_serialize_method_exception():
"""
Unit test of context manager to set info.serialize_method. Normally just
used to set this for writing a Table to file (FITS, ECSV, HDF5).
"""
t = simple_table(masked=True)
origs = deepcopy(t['a'].info.serialize_method)
try:
with serialize_method_as(t, 'test'):
assert all(t['a'].info.serialize_method[key] == 'test'
for key in t['a'].info.serialize_method)
raise ZeroDivisionError()
except ZeroDivisionError:
pass
assert t['a'].info.serialize_method == origs # dict compare
|
49d9ab7d9f346563f4ac21bcb23d0c5747d9b5d871f746807ab965454ae061c3 | from os.path import abspath, dirname, join
import textwrap
import pytest
from astropy.coordinates import SkyCoord
from astropy.time import Time
from astropy.table.table import Table
from astropy import extern
from astropy.utils.compat.optional_deps import HAS_BLEACH, HAS_IPYTHON # noqa
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
EXTERN_DIR = abspath(join(dirname(extern.__file__), 'jquery', 'data'))
JQUERY_MIN_JS = 'jquery-3.6.0.min.js'
REFERENCE = """
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
<style>
body {font-family: sans-serif;}
table.dataTable {width: auto !important; margin: 0 !important;}
.dataTables_filter, .dataTables_paginate {float: left !important; margin-left:1em}
</style>
<link href="%(datatables_css_url)s" rel="stylesheet" type="text/css"/>
<script src="%(jquery_url)s">
</script>
<script src="%(datatables_js_url)s">
</script>
</head>
<body>
<script>
var astropy_sort_num = function(a, b) {
var a_num = parseFloat(a);
var b_num = parseFloat(b);
if (isNaN(a_num) && isNaN(b_num))
return ((a < b) ? -1 : ((a > b) ? 1 : 0));
else if (!isNaN(a_num) && !isNaN(b_num))
return ((a_num < b_num) ? -1 : ((a_num > b_num) ? 1 : 0));
else
return isNaN(a_num) ? -1 : 1;
}
jQuery.extend( jQuery.fn.dataTableExt.oSort, {
"optionalnum-asc": astropy_sort_num,
"optionalnum-desc": function (a,b) { return -astropy_sort_num(a, b); }
});
$(document).ready(function() {
$('#%(table_id)s').dataTable({
order: [],
pageLength: %(length)s,
lengthMenu: [[%(display_length)s, -1], [%(display_length)s, 'All']],
pagingType: "full_numbers",
columnDefs: [{targets: [0], type: "optionalnum"}]
});
} ); </script>
<table class="%(table_class)s" id="%(table_id)s">
<thead>
<tr>
<th>a</th>
<th>b</th>
</tr>
</thead>
%(lines)s
</table>
</body>
</html>
"""
TPL = (' <tr>\n'
' <td>{0}</td>\n'
' <td>{1}</td>\n'
' </tr>')
def format_lines(col1, col2):
col1_format = getattr(col1.info, 'default_format', lambda x: x)
col2_format = getattr(col2.info, 'default_format', lambda x: x)
return '\n'.join(TPL.format(col1_format(v1), col2_format(v2))
for v1, v2 in zip(col1, col2))
def test_write_jsviewer_default(tmpdir):
t = Table()
t['a'] = [1, 2, 3, 4, 5]
t['b'] = ['a', 'b', 'c', 'd', 'e']
t['a'].unit = 'm'
tmpfile = tmpdir.join('test.html').strpath
t.write(tmpfile, format='jsviewer')
ref = REFERENCE % dict(
lines=format_lines(t['a'], t['b']),
table_class='display compact',
table_id=f'table{id(t)}',
length='50',
display_length='10, 25, 50, 100, 500, 1000',
datatables_css_url='https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css',
datatables_js_url='https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js',
jquery_url='https://code.jquery.com/' + JQUERY_MIN_JS
)
with open(tmpfile) as f:
assert f.read().strip() == ref.strip()
def test_write_jsviewer_overwrite(tmpdir):
t = Table()
t['a'] = [1, 2, 3, 4, 5]
t['b'] = ['a', 'b', 'c', 'd', 'e']
t['a'].unit = 'm'
tmpfile = tmpdir.join('test.html').strpath
# normal write
t.write(tmpfile, format='jsviewer')
# errors on overwrite
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(tmpfile, format='jsviewer')
# unless specified
t.write(tmpfile, format='jsviewer', overwrite=True)
@pytest.mark.parametrize('mixin', [
Time(['J2000', 'J2001']),
Time([50000., 50001.0001], format='mjd'),
SkyCoord(ra=[100., 110.], dec=[-10., 10.], unit='deg')])
def test_write_jsviewer_mixin(tmpdir, mixin):
t = Table()
t['a'] = [1, 2]
t['b'] = mixin
t['a'].unit = 'm'
tmpfile = tmpdir.join('test.html').strpath
t.write(tmpfile, format='jsviewer')
ref = REFERENCE % dict(
lines=format_lines(t['a'], t['b']),
table_class='display compact',
table_id=f'table{id(t)}',
length='50',
display_length='10, 25, 50, 100, 500, 1000',
datatables_css_url='https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css',
datatables_js_url='https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js',
jquery_url='https://code.jquery.com/' + JQUERY_MIN_JS
)
with open(tmpfile) as f:
assert f.read().strip() == ref.strip()
@pytest.mark.skipif('not HAS_BLEACH')
def test_write_jsviewer_options(tmpdir):
t = Table()
t['a'] = [1, 2, 3, 4, 5]
t['b'] = ['<b>a</b>', 'b', 'c', 'd', 'e']
t['a'].unit = 'm'
tmpfile = tmpdir.join('test.html').strpath
t.write(tmpfile, format='jsviewer', table_id='test', max_lines=3,
jskwargs={'display_length': 5}, table_class='display hover',
htmldict=dict(raw_html_cols='b'))
ref = REFERENCE % dict(
lines=format_lines(t['a'][:3], t['b'][:3]),
table_class='display hover',
table_id='test',
length='5',
display_length='5, 10, 25, 50, 100, 500, 1000',
datatables_css_url='https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css',
datatables_js_url='https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js',
jquery_url='https://code.jquery.com/' + JQUERY_MIN_JS
)
with open(tmpfile) as f:
assert f.read().strip() == ref.strip()
def test_write_jsviewer_local(tmpdir):
t = Table()
t['a'] = [1, 2, 3, 4, 5]
t['b'] = ['a', 'b', 'c', 'd', 'e']
t['a'].unit = 'm'
tmpfile = tmpdir.join('test.html').strpath
t.write(tmpfile, format='jsviewer', table_id='test',
jskwargs={'use_local_files': True})
ref = REFERENCE % dict(
lines=format_lines(t['a'], t['b']),
table_class='display compact',
table_id='test',
length='50',
display_length='10, 25, 50, 100, 500, 1000',
datatables_css_url='file://' + join(EXTERN_DIR, 'css', 'jquery.dataTables.css'),
datatables_js_url='file://' + join(EXTERN_DIR, 'js', 'jquery.dataTables.min.js'),
jquery_url='file://' + join(EXTERN_DIR, 'js', JQUERY_MIN_JS)
)
with open(tmpfile) as f:
assert f.read().strip() == ref.strip()
@pytest.mark.skipif('not HAS_IPYTHON')
def test_show_in_notebook():
t = Table()
t['a'] = [1, 2, 3, 4, 5]
t['b'] = ['b', 'c', 'a', 'd', 'e']
htmlstr_windx = t.show_in_notebook().data # should default to 'idx'
htmlstr_windx_named = t.show_in_notebook(show_row_index='realidx').data
htmlstr_woindx = t.show_in_notebook(show_row_index=False).data
assert (textwrap.dedent("""
<thead><tr><th>idx</th><th>a</th><th>b</th></tr></thead>
<tr><td>0</td><td>1</td><td>b</td></tr>
<tr><td>1</td><td>2</td><td>c</td></tr>
<tr><td>2</td><td>3</td><td>a</td></tr>
<tr><td>3</td><td>4</td><td>d</td></tr>
<tr><td>4</td><td>5</td><td>e</td></tr>
""").strip() in htmlstr_windx)
assert '<thead><tr><th>realidx</th><th>a</th><th>b</th></tr></thead>' in htmlstr_windx_named
assert '<thead><tr><th>a</th><th>b</th></tr></thead>' in htmlstr_woindx
|
24c58c555eafd5e877d150a7e2c3d9bd7e1a571f76941855bdd15f7952287594 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import OrderedDict, UserDict
from collections.abc import Mapping
import pytest
import numpy as np
from astropy.table import Column, TableColumns, Table, MaskedColumn
import astropy.units as u
class DictLike(Mapping):
"""A minimal mapping-like object that does not subclass dict.
This is used to test code that expects dict-like but without actually
inheriting from dict.
"""
def __init__(self, *args, **kwargs):
self._data = dict(*args, **kwargs)
def __getitem__(self, item):
return self._data[item]
def __setitem__(self, item, value):
self._data[item] = value
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
class TestTableColumnsInit():
def test_init(self):
"""Test initialisation with lists, tuples, dicts of arrays
rather than Columns [regression test for #2647]"""
x1 = np.arange(10.)
x2 = np.arange(5.)
x3 = np.arange(7.)
col_list = [('x1', x1), ('x2', x2), ('x3', x3)]
tc_list = TableColumns(col_list)
for col in col_list:
assert col[0] in tc_list
assert tc_list[col[0]] is col[1]
col_tuple = (('x1', x1), ('x2', x2), ('x3', x3))
tc_tuple = TableColumns(col_tuple)
for col in col_tuple:
assert col[0] in tc_tuple
assert tc_tuple[col[0]] is col[1]
col_dict = dict([('x1', x1), ('x2', x2), ('x3', x3)])
tc_dict = TableColumns(col_dict)
for col in tc_dict.keys():
assert col in tc_dict
assert tc_dict[col] is col_dict[col]
columns = [Column(col[1], name=col[0]) for col in col_list]
tc = TableColumns(columns)
for col in columns:
assert col.name in tc
assert tc[col.name] is col
# pytest.mark.usefixtures('table_type')
class BaseInitFrom():
def _setup(self, table_type):
pass
def test_basic_init(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=('a', 'b', 'c'))
assert t.colnames == ['a', 'b', 'c']
assert np.all(t['a'] == np.array([1, 3]))
assert np.all(t['b'] == np.array([2, 4]))
assert np.all(t['c'] == np.array([3, 5]))
assert all(t[name].name == name for name in t.colnames)
def test_set_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=('a', 'b', 'c'), dtype=('i4', 'f4', 'f8'))
assert t.colnames == ['a', 'b', 'c']
assert np.all(t['a'] == np.array([1, 3], dtype='i4'))
assert np.all(t['b'] == np.array([2, 4], dtype='f4'))
assert np.all(t['c'] == np.array([3, 5], dtype='f8'))
assert t['a'].dtype.type == np.int32
assert t['b'].dtype.type == np.float32
assert t['c'].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_names_dtype_mismatch(self, table_type):
self._setup(table_type)
with pytest.raises(ValueError):
table_type(self.data, names=('a',), dtype=('i4', 'f4', 'i4'))
def test_names_cols_mismatch(self, table_type):
self._setup(table_type)
with pytest.raises(ValueError):
table_type(self.data, names=('a',), dtype=('i4'))
@pytest.mark.usefixtures('table_type')
class BaseInitFromListLike(BaseInitFrom):
def test_names_cols_mismatch(self, table_type):
self._setup(table_type)
with pytest.raises(ValueError):
table_type(self.data, names=['a'], dtype=[int])
def test_names_copy_false(self, table_type):
self._setup(table_type)
with pytest.raises(ValueError):
table_type(self.data, names=['a'], dtype=[int], copy=False)
@pytest.mark.usefixtures('table_type')
class BaseInitFromDictLike(BaseInitFrom):
pass
@pytest.mark.usefixtures('table_type')
class TestInitFromNdarrayHomo(BaseInitFromListLike):
def setup_method(self, method):
self.data = np.array([(1, 2, 3),
(3, 4, 5)],
dtype='i4')
def test_default_names(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert t.colnames == ['col0', 'col1', 'col2']
def test_ndarray_ref(self, table_type):
"""Init with ndarray and copy=False and show that this is a reference
to input ndarray"""
self._setup(table_type)
t = table_type(self.data, copy=False)
t['col1'][1] = 0
assert t.as_array()['col1'][1] == 0
assert t['col1'][1] == 0
assert self.data[1][1] == 0
def test_partial_names_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=['a', None, 'c'], dtype=[None, None, 'f8'])
assert t.colnames == ['a', 'col1', 'c']
assert t['a'].dtype.type == np.int32
assert t['col1'].dtype.type == np.int32
assert t['c'].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_ref(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=['a', None, 'c'])
assert t.colnames == ['a', 'col1', 'c']
assert t['a'].dtype.type == np.int32
assert t['col1'].dtype.type == np.int32
assert t['c'].dtype.type == np.int32
assert all(t[name].name == name for name in t.colnames)
@pytest.mark.usefixtures('table_type')
class TestInitFromListOfLists(BaseInitFromListLike):
def setup_method(self, table_type):
self._setup(table_type)
self.data = [(np.int32(1), np.int32(3)),
Column(name='col1', data=[2, 4], dtype=np.int32),
np.array([3, 5], dtype=np.int32)]
def test_default_names(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert t.colnames == ['col0', 'col1', 'col2']
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=['b', None, 'c'],
dtype=['f4', None, 'f8'])
assert t.colnames == ['b', 'col1', 'c']
assert t['b'].dtype.type == np.float32
assert t['col1'].dtype.type == np.int32
assert t['c'].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_bad_data(self, table_type):
self._setup(table_type)
with pytest.raises(ValueError):
table_type([[1, 2],
[3, 4, 5]])
@pytest.mark.usefixtures('table_type')
class TestInitFromListOfDicts(BaseInitFromListLike):
def _setup(self, table_type):
self.data = [{'a': 1, 'b': 2, 'c': 3},
{'a': 3, 'b': 4, 'c': 5}]
self.data_ragged = [{'a': 1, 'b': 2},
{'a': 2, 'c': 4}]
def test_names(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert all(colname in {'a', 'b', 'c'} for colname in t.colnames)
def test_names_ordered(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=('c', 'b', 'a'))
assert t.colnames == ['c', 'b', 'a']
def test_missing_data_init_from_dict(self, table_type):
self._setup(table_type)
dat = self.data_ragged
for rows in [False, True]:
t = table_type(rows=dat) if rows else table_type(dat)
assert np.all(t['a'] == [1, 2])
assert np.all(t['b'].mask == [False, True])
assert np.all(t['b'].data == [2, 2])
assert np.all(t['c'].mask == [True, False])
assert np.all(t['c'].data == [4, 4])
assert type(t['a']) is (MaskedColumn if t.masked else Column)
assert type(t['b']) is MaskedColumn
assert type(t['c']) is MaskedColumn
class TestInitFromListOfMapping(TestInitFromListOfDicts):
"""Test that init from a Mapping that is not a dict subclass works"""
def _setup(self, table_type):
self.data = [DictLike(a=1, b=2, c=3),
DictLike(a=3, b=4, c=5)]
self.data_ragged = [DictLike(a=1, b=2),
DictLike(a=2, c=4)]
# Make sure data rows are not a dict subclass
assert not isinstance(self.data[0], dict)
@pytest.mark.usefixtures('table_type')
class TestInitFromColsList(BaseInitFromListLike):
def _setup(self, table_type):
self.data = [Column([1, 3], name='x', dtype=np.int32),
np.array([2, 4], dtype=np.int32),
np.array([3, 5], dtype='i8')]
def test_default_names(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert t.colnames == ['x', 'col1', 'col2']
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=['b', None, 'c'], dtype=['f4', None, 'f8'])
assert t.colnames == ['b', 'col1', 'c']
assert t['b'].dtype.type == np.float32
assert t['col1'].dtype.type == np.int32
assert t['c'].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_ref(self, table_type):
"""Test that initializing from a list of columns can be done by reference"""
self._setup(table_type)
t = table_type(self.data, copy=False)
t['x'][0] = 100
assert self.data[0][0] == 100
@pytest.mark.usefixtures('table_type')
class TestInitFromNdarrayStruct(BaseInitFromDictLike):
def _setup(self, table_type):
self.data = np.array([(1, 2, 3),
(3, 4, 5)],
dtype=[('x', 'i8'), ('y', 'i4'), ('z', 'i8')])
def test_ndarray_ref(self, table_type):
"""Init with ndarray and copy=False and show that table uses reference
to input ndarray"""
self._setup(table_type)
t = table_type(self.data, copy=False)
t['x'][1] = 0 # Column-wise assignment
t[0]['y'] = 0 # Row-wise assignment
assert self.data['x'][1] == 0
assert self.data['y'][0] == 0
assert np.all(np.array(t) == self.data)
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=['e', None, 'd'], dtype=['f4', None, 'f8'])
assert t.colnames == ['e', 'y', 'd']
assert t['e'].dtype.type == np.float32
assert t['y'].dtype.type == np.int32
assert t['d'].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_ref(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=['e', None, 'd'], copy=False)
assert t.colnames == ['e', 'y', 'd']
assert t['e'].dtype.type == np.int64
assert t['y'].dtype.type == np.int32
assert t['d'].dtype.type == np.int64
assert all(t[name].name == name for name in t.colnames)
@pytest.mark.usefixtures('table_type')
class TestInitFromDict(BaseInitFromDictLike):
def _setup(self, table_type):
self.data = dict([('a', Column([1, 3], name='x')),
('b', [2, 4]),
('c', np.array([3, 5], dtype='i8'))])
@pytest.mark.usefixtures('table_type')
class TestInitFromMapping(BaseInitFromDictLike):
def _setup(self, table_type):
self.data = UserDict([('a', Column([1, 3], name='x')),
('b', [2, 4]),
('c', np.array([3, 5], dtype='i8'))])
assert isinstance(self.data, Mapping)
assert not isinstance(self.data, dict)
@pytest.mark.usefixtures('table_type')
class TestInitFromOrderedDict(BaseInitFromDictLike):
def _setup(self, table_type):
self.data = OrderedDict([('a', Column(name='x', data=[1, 3])),
('b', [2, 4]),
('c', np.array([3, 5], dtype='i8'))])
def test_col_order(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert t.colnames == ['a', 'b', 'c']
@pytest.mark.usefixtures('table_type')
class TestInitFromRow(BaseInitFromDictLike):
def _setup(self, table_type):
arr = np.array([(1, 2, 3),
(3, 4, 5)],
dtype=[('x', 'i8'), ('y', 'i8'), ('z', 'f8')])
self.data = table_type(arr, meta={'comments': ['comment1', 'comment2']})
def test_init_from_row(self, table_type):
self._setup(table_type)
t = table_type(self.data[0])
# Values and meta match original
assert t.meta['comments'][0] == 'comment1'
for name in t.colnames:
assert np.all(t[name] == self.data[name][0:1])
assert all(t[name].name == name for name in t.colnames)
# Change value in new instance and check that original is the same
t['x'][0] = 8
t.meta['comments'][1] = 'new comment2'
assert np.all(t['x'] == np.array([8]))
assert np.all(self.data['x'] == np.array([1, 3]))
assert self.data.meta['comments'][1] == 'comment2'
@pytest.mark.usefixtures('table_type')
class TestInitFromTable(BaseInitFromDictLike):
def _setup(self, table_type):
arr = np.array([(1, 2, 3),
(3, 4, 5)],
dtype=[('x', 'i8'), ('y', 'i8'), ('z', 'f8')])
self.data = table_type(arr, meta={'comments': ['comment1', 'comment2']})
def test_data_meta_copy(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert t.meta['comments'][0] == 'comment1'
t['x'][1] = 8
t.meta['comments'][1] = 'new comment2'
assert self.data.meta['comments'][1] == 'comment2'
assert np.all(t['x'] == np.array([1, 8]))
assert np.all(self.data['x'] == np.array([1, 3]))
assert t['z'].name == 'z'
assert all(t[name].name == name for name in t.colnames)
def test_table_ref(self, table_type):
self._setup(table_type)
t = table_type(self.data, copy=False)
t['x'][1] = 0
assert t['x'][1] == 0
assert self.data['x'][1] == 0
assert np.all(t.as_array() == self.data.as_array())
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=['e', None, 'd'], dtype=['f4', None, 'i8'])
assert t.colnames == ['e', 'y', 'd']
assert t['e'].dtype.type == np.float32
assert t['y'].dtype.type == np.int64
assert t['d'].dtype.type == np.int64
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_ref(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=['e', None, 'd'], copy=False)
assert t.colnames == ['e', 'y', 'd']
assert t['e'].dtype.type == np.int64
assert t['y'].dtype.type == np.int64
assert t['d'].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_init_from_columns(self, table_type):
self._setup(table_type)
t = table_type(self.data)
t2 = table_type(t.columns['z', 'x', 'y'])
assert t2.colnames == ['z', 'x', 'y']
assert t2.dtype.names == ('z', 'x', 'y')
def test_init_from_columns_slice(self, table_type):
self._setup(table_type)
t = table_type(self.data)
t2 = table_type(t.columns[0:2])
assert t2.colnames == ['x', 'y']
assert t2.dtype.names == ('x', 'y')
def test_init_from_columns_mix(self, table_type):
self._setup(table_type)
t = table_type(self.data)
t2 = table_type([t.columns[0], t.columns['z']])
assert t2.colnames == ['x', 'z']
assert t2.dtype.names == ('x', 'z')
@pytest.mark.usefixtures('table_type')
class TestInitFromNone():
# Note table_table.TestEmptyData tests initializing a completely empty
# table and adding data.
def test_data_none_with_cols(self, table_type):
"""
Test different ways of initing an empty table
"""
np_t = np.empty(0, dtype=[('a', 'f4', (2,)),
('b', 'i4')])
for kwargs in ({'names': ('a', 'b')},
{'names': ('a', 'b'), 'dtype': (('f4', (2,)), 'i4')},
{'dtype': [('a', 'f4', (2,)), ('b', 'i4')]},
{'dtype': np_t.dtype}):
t = table_type(**kwargs)
assert t.colnames == ['a', 'b']
assert len(t['a']) == 0
assert len(t['b']) == 0
if 'dtype' in kwargs:
assert t['a'].dtype.type == np.float32
assert t['b'].dtype.type == np.int32
assert t['a'].shape[1:] == (2,)
@pytest.mark.usefixtures('table_types')
class TestInitFromRows():
def test_init_with_rows(self, table_type):
for rows in ([[1, 'a'], [2, 'b']],
[(1, 'a'), (2, 'b')],
((1, 'a'), (2, 'b'))):
t = table_type(rows=rows, names=('a', 'b'))
assert np.all(t['a'] == [1, 2])
assert np.all(t['b'] == ['a', 'b'])
assert t.colnames == ['a', 'b']
assert t['a'].dtype.kind == 'i'
assert t['b'].dtype.kind in ('S', 'U')
# Regression test for
# https://github.com/astropy/astropy/issues/3052
assert t['b'].dtype.str.endswith('1')
rows = np.arange(6).reshape(2, 3)
t = table_type(rows=rows, names=('a', 'b', 'c'), dtype=['f8', 'f4', 'i8'])
assert np.all(t['a'] == [0, 3])
assert np.all(t['b'] == [1, 4])
assert np.all(t['c'] == [2, 5])
assert t.colnames == ['a', 'b', 'c']
assert t['a'].dtype.str.endswith('f8')
assert t['b'].dtype.str.endswith('f4')
assert t['c'].dtype.str.endswith('i8')
def test_init_with_rows_and_data(self, table_type):
with pytest.raises(ValueError) as err:
table_type(data=[[1]], rows=[[1]])
assert "Cannot supply both `data` and `rows` values" in str(err.value)
@pytest.mark.parametrize('has_data', [True, False])
def test_init_table_with_names_and_structured_dtype(has_data):
"""Test fix for #10393"""
arr = np.ones(2, dtype=np.dtype([('a', 'i4'), ('b', 'f4')]))
data_args = [arr] if has_data else []
t = Table(*data_args, names=['x', 'y'], dtype=arr.dtype)
assert t.colnames == ['x', 'y']
assert str(t['x'].dtype) == 'int32'
assert str(t['y'].dtype) == 'float32'
assert len(t) == (2 if has_data else 0)
@pytest.mark.usefixtures('table_type')
def test_init_and_ref_from_multidim_ndarray(table_type):
"""
Test that initializing from an ndarray structured array with
a multi-dim column works for both copy=False and True and that
the referencing is as expected.
"""
for copy in (False, True):
nd = np.array([(1, [10, 20]),
(3, [30, 40])],
dtype=[('a', 'i8'), ('b', 'i8', (2,))])
t = table_type(nd, copy=copy)
assert t.colnames == ['a', 'b']
assert t['a'].shape == (2,)
assert t['b'].shape == (2, 2)
t['a'][0] = -200
t['b'][1][1] = -100
if copy:
assert nd['a'][0] == 1
assert nd['b'][1][1] == 40
else:
assert nd['a'][0] == -200
assert nd['b'][1][1] == -100
@pytest.mark.usefixtures('table_type')
@pytest.mark.parametrize('copy', [False, True])
def test_init_and_ref_from_dict(table_type, copy):
"""
Test that initializing from a dict works for both copy=False and True and that
the referencing is as expected.
"""
x1 = np.arange(10.)
x2 = np.zeros(10)
col_dict = dict([('x1', x1), ('x2', x2)])
t = table_type(col_dict, copy=copy)
assert set(t.colnames) == {'x1', 'x2'}
assert t['x1'].shape == (10,)
assert t['x2'].shape == (10,)
t['x1'][0] = -200
t['x2'][1] = -100
if copy:
assert x1[0] == 0.
assert x2[1] == 0.
else:
assert x1[0] == -200
assert x2[1] == -100
def test_add_none_object_column():
"""Test fix for a problem introduced in #10636 (see
https://github.com/astropy/astropy/pull/10636#issuecomment-676847515)
"""
t = Table(data={'a': [1, 2, 3]})
t['b'] = None
assert all(val is None for val in t['b'])
assert t['b'].dtype.kind == 'O'
@pytest.mark.usefixtures('table_type')
def test_init_from_row_OrderedDict(table_type):
row1 = OrderedDict([('b', 1), ('a', 0)])
row2 = {'a': 10, 'b': 20}
rows12 = [row1, row2]
row3 = dict([('b', 1), ('a', 0)])
row4 = dict([('b', 11), ('a', 10)])
rows34 = [row3, row4]
t1 = table_type(rows=rows12)
t2 = table_type(rows=rows34)
t3 = t2[sorted(t2.colnames)]
assert t1.colnames == ['b', 'a']
assert t2.colnames == ['b', 'a']
assert t3.colnames == ['a', 'b']
def test_init_from_rows_as_generator():
rows = ((1 + ii, 2 + ii) for ii in range(2))
t = Table(rows=rows)
assert np.all(t['col0'] == [1, 2])
assert np.all(t['col1'] == [2, 3])
@pytest.mark.parametrize('dtype', ['fail', 'i4'])
def test_init_bad_dtype_in_empty_table(dtype):
with pytest.raises(ValueError,
match='type was specified but could not be parsed for column names'):
Table(dtype=dtype)
def test_init_data_type_not_allowed_to_init_table():
with pytest.raises(ValueError,
match="Data type <class 'str'> not allowed to init Table"):
Table('hello')
def test_init_Table_from_list_of_quantity():
"""Test fix for #11327"""
# Variation on original example in #11327 at the Table level
data = [{'x': 5 * u.m, 'y': 1 * u.m}, {'x': 10 * u.m, 'y': 3}]
t = Table(data)
assert t['x'].unit is u.m
assert t['y'].unit is None
assert t['x'].dtype.kind == 'f'
assert t['y'].dtype.kind == 'O'
assert np.all(t['x'] == [5, 10])
assert t['y'][0] == 1 * u.m
assert t['y'][1] == 3
|
20c9f57c2eb6741c22842bf5418cc1eca996f04436a249c8d1e37c0a28bb84f2 | import os
import re
import numpy as np
import pytest
from astropy.table.scripts import showtable
ROOT = os.path.abspath(os.path.dirname(__file__))
ASCII_ROOT = os.path.join(ROOT, '..', '..', 'io', 'ascii', 'tests')
FITS_ROOT = os.path.join(ROOT, '..', '..', 'io', 'fits', 'tests')
VOTABLE_ROOT = os.path.join(ROOT, '..', '..', 'io', 'votable', 'tests')
def test_missing_file(capsys):
showtable.main(['foobar.fits'])
out, err = capsys.readouterr()
assert err.startswith("ERROR: [Errno 2] No such file or directory: "
"'foobar.fits'")
def test_info(capsys):
showtable.main([os.path.join(FITS_ROOT, 'data/table.fits'), '--info'])
out, err = capsys.readouterr()
assert out.splitlines() == ['<Table length=3>',
' name dtype ',
'------ -------',
'target bytes20',
' V_mag float32']
def test_stats(capsys):
showtable.main([os.path.join(FITS_ROOT, 'data/table.fits'), '--stats'])
out, err = capsys.readouterr()
expected = ['<Table length=3>',
' name mean std min max ',
'------ ------- ------- ---- ----',
'target -- -- -- --',
' V_mag 12.866[0-9]? 1.72111 11.1 15.2']
out = out.splitlines()
assert out[:4] == expected[:4]
# Here we use re.match as in some cases one of the values above is
# platform-dependent.
assert re.match(expected[4], out[4]) is not None
def test_fits(capsys):
showtable.main([os.path.join(FITS_ROOT, 'data/table.fits')])
out, err = capsys.readouterr()
assert out.splitlines() == [' target V_mag',
'------- -----',
'NGC1001 11.1',
'NGC1002 12.3',
'NGC1003 15.2']
def test_fits_hdu(capsys):
from astropy.units import UnitsWarning
with pytest.warns(UnitsWarning):
showtable.main([
os.path.join(FITS_ROOT, 'data/zerowidth.fits'),
'--hdu', 'AIPS OF',
])
out, err = capsys.readouterr()
assert out.startswith(
' TIME SOURCE ID ANTENNA NO. SUBARRAY FREQ ID ANT FLAG STATUS 1\n'
' DAYS \n'
'---------- --------- ----------- -------- ------- -------- --------\n'
'0.14438657 1 10 1 1 4 4\n')
def test_csv(capsys):
showtable.main([os.path.join(ASCII_ROOT, 'data/simple_csv.csv')])
out, err = capsys.readouterr()
assert out.splitlines() == [' a b c ',
'--- --- ---',
' 1 2 3',
' 4 5 6']
def test_ascii_format(capsys):
showtable.main([os.path.join(ASCII_ROOT, 'data/commented_header.dat'),
'--format', 'ascii.commented_header'])
out, err = capsys.readouterr()
assert out.splitlines() == [' a b c ',
'--- --- ---',
' 1 2 3',
' 4 5 6']
def test_ascii_delimiter(capsys):
showtable.main([os.path.join(ASCII_ROOT, 'data/simple2.txt'),
'--format', 'ascii', '--delimiter', '|'])
out, err = capsys.readouterr()
assert out.splitlines() == [
"obsid redshift X Y object rad ",
"----- -------- ---- ---- ----------- ----",
" 3102 0.32 4167 4085 Q1250+568-A 9.0",
" 3102 0.32 4706 3916 Q1250+568-B 14.0",
" 877 0.22 4378 3892 'Source 82' 12.5",
]
def test_votable(capsys):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
showtable.main([os.path.join(VOTABLE_ROOT, 'data/regression.xml'),
'--table-id', 'main_table', '--max-width', '50'])
out, err = capsys.readouterr()
assert out.splitlines() == [
' string_test string_test_2 ... bitarray2 ',
'----------------- ------------- ... -------------',
' String & test Fixed stri ... True .. False',
'String & test 0123456789 ... -- .. --',
' XXXX XXXX ... -- .. --',
' ... -- .. --',
' ... -- .. --']
def test_max_lines(capsys):
showtable.main([os.path.join(ASCII_ROOT, 'data/cds2.dat'),
'--format', 'ascii.cds', '--max-lines', '7',
'--max-width', '30'])
out, err = capsys.readouterr()
assert out.splitlines() == [
' SST ... Note',
' ... ',
'--------------- ... ----',
'041314.1+281910 ... --',
' ... ... ...',
'044427.1+251216 ... --',
'044642.6+245903 ... --',
'Length = 215 rows',
]
def test_show_dtype(capsys):
showtable.main([os.path.join(FITS_ROOT, 'data/table.fits'),
'--show-dtype'])
out, err = capsys.readouterr()
assert out.splitlines() == [
' target V_mag ',
'bytes20 float32',
'------- -------',
'NGC1001 11.1',
'NGC1002 12.3',
'NGC1003 15.2',
]
def test_hide_unit(capsys):
showtable.main([os.path.join(ASCII_ROOT, 'data/cds.dat'),
'--format', 'ascii.cds'])
out, err = capsys.readouterr()
assert out.splitlines() == [
'Index RAh RAm RAs DE- DEd DEm DEs Match Class AK Fit ',
' h min s deg arcmin arcsec mag GMsun',
'----- --- --- ----- --- --- ------ ------ ----- ----- --- -----',
' 1 3 28 39.09 + 31 6 1.9 -- I* -- 1.35',
]
showtable.main([os.path.join(ASCII_ROOT, 'data/cds.dat'),
'--format', 'ascii.cds', '--hide-unit'])
out, err = capsys.readouterr()
assert out.splitlines() == [
'Index RAh RAm RAs DE- DEd DEm DEs Match Class AK Fit ',
'----- --- --- ----- --- --- --- --- ----- ----- --- ----',
' 1 3 28 39.09 + 31 6 1.9 -- I* -- 1.35',
]
|
53a02884d2a66520a099cb88256d52de64e52392aa95320054f5fd01e56b00ae | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import table
from astropy.table import pprint
class MyRow(table.Row):
def __str__(self):
return str(self.as_void())
class MyColumn(table.Column):
pass
class MyMaskedColumn(table.MaskedColumn):
pass
class MyTableColumns(table.TableColumns):
pass
class MyTableFormatter(pprint.TableFormatter):
pass
class MyTable(table.Table):
Row = MyRow
Column = MyColumn
MaskedColumn = MyMaskedColumn
TableColumns = MyTableColumns
TableFormatter = MyTableFormatter
def test_simple_subclass():
t = MyTable([[1, 2], [3, 4]])
row = t[0]
assert isinstance(row, MyRow)
assert isinstance(t['col0'], MyColumn)
assert isinstance(t.columns, MyTableColumns)
assert isinstance(t.formatter, MyTableFormatter)
t2 = MyTable(t)
row = t2[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
t3 = table.Table(t)
row = t3[0]
assert not isinstance(row, MyRow)
assert str(row) != '(1, 3)'
t = MyTable([[1, 2], [3, 4]], masked=True)
row = t[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
assert isinstance(t['col0'], MyMaskedColumn)
assert isinstance(t.formatter, MyTableFormatter)
class ParamsRow(table.Row):
"""
Row class that allows access to an arbitrary dict of parameters
stored as a dict object in the ``params`` column.
"""
def __getitem__(self, item):
if item not in self.colnames:
return super().__getitem__('params')[item]
else:
return super().__getitem__(item)
def keys(self):
out = [name for name in self.colnames if name != 'params']
params = [key.lower() for key in sorted(self['params'])]
return out + params
def values(self):
return [self[key] for key in self.keys()]
class ParamsTable(table.Table):
Row = ParamsRow
def test_params_table():
t = ParamsTable(names=['a', 'b', 'params'], dtype=['i', 'f', 'O'])
t.add_row((1, 2.0, {'x': 1.5, 'y': 2.5}))
t.add_row((2, 3.0, {'z': 'hello', 'id': 123123}))
assert t['params'][0] == {'x': 1.5, 'y': 2.5}
assert t[0]['params'] == {'x': 1.5, 'y': 2.5}
assert t[0]['y'] == 2.5
assert t[1]['id'] == 123123
assert list(t[1].keys()) == ['a', 'b', 'id', 'z']
assert list(t[1].values()) == [2, 3.0, 123123, 'hello']
|
aee33a85b110525749e09d487b63f2d9593000dcb6541834e8b6443dc2b19db9 | import numpy as np
import pickle
from astropy.table import Table, Column, MaskedColumn, QTable
from astropy.table.table_helpers import simple_table
from astropy.units import Quantity, deg
from astropy.time import Time
from astropy.coordinates import SkyCoord
def test_pickle_column(protocol):
c = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1})
cs = pickle.dumps(c)
cp = pickle.loads(cs)
assert np.all(cp == c)
assert cp.attrs_equal(c)
assert cp._parent_table is None
assert repr(c) == repr(cp)
def test_pickle_masked_column(protocol):
c = MaskedColumn(data=[1, 2], name='a', format='%05d', description='col a', unit='cm',
meta={'a': 1})
c.mask[1] = True
c.fill_value = -99
cs = pickle.dumps(c)
cp = pickle.loads(cs)
assert np.all(cp._data == c._data)
assert np.all(cp.mask == c.mask)
assert cp.attrs_equal(c)
assert cp.fill_value == -99
assert cp._parent_table is None
assert repr(c) == repr(cp)
def test_pickle_multidimensional_column(protocol):
"""Regression test for https://github.com/astropy/astropy/issues/4098"""
a = np.zeros((3, 2))
c = Column(a, name='a')
cs = pickle.dumps(c)
cp = pickle.loads(cs)
assert np.all(c == cp)
assert c.shape == cp.shape
assert cp.attrs_equal(c)
assert repr(c) == repr(cp)
def test_pickle_table(protocol):
a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1})
b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm',
meta={'b': 1})
for table_class in Table, QTable:
t = table_class([a, b], meta={'a': 1, 'b': Quantity(10, unit='s')})
t['c'] = Quantity([1, 2], unit='m')
t['d'] = Time(['2001-01-02T12:34:56', '2001-02-03T00:01:02'])
t['e'] = SkyCoord([125.0, 180.0] * deg, [-45.0, 36.5] * deg)
ts = pickle.dumps(t)
tp = pickle.loads(ts)
assert tp.__class__ is table_class
assert np.all(tp['a'] == t['a'])
assert np.all(tp['b'] == t['b'])
# test mixin columns
assert np.all(tp['c'] == t['c'])
assert np.all(tp['d'] == t['d'])
assert np.all(tp['e'].ra == t['e'].ra)
assert np.all(tp['e'].dec == t['e'].dec)
assert type(tp['c']) is type(t['c']) # nopep8
assert type(tp['d']) is type(t['d']) # nopep8
assert type(tp['e']) is type(t['e']) # nopep8
assert tp.meta == t.meta
assert type(tp) is type(t)
assert isinstance(tp['c'], Quantity if (table_class is QTable) else Column)
def test_pickle_masked_table(protocol):
a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1})
b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm',
meta={'b': 1})
t = Table([a, b], meta={'a': 1}, masked=True)
t['a'].mask[1] = True
t['a'].fill_value = -99
ts = pickle.dumps(t)
tp = pickle.loads(ts)
for colname in ('a', 'b'):
for attr in ('_data', 'mask', 'fill_value'):
assert np.all(getattr(tp[colname], attr) == getattr(tp[colname], attr))
assert tp['a'].attrs_equal(t['a'])
assert tp['b'].attrs_equal(t['b'])
assert tp.meta == t.meta
def test_pickle_indexed_table(protocol):
"""
Ensure that any indices that have been added will survive pickling.
"""
t = simple_table()
t.add_index('a')
t.add_index(['a', 'b'])
ts = pickle.dumps(t)
tp = pickle.loads(ts)
assert len(t.indices) == len(tp.indices)
for index, indexp in zip(t.indices, tp.indices):
assert np.all(index.data.data == indexp.data.data)
assert index.data.data.colnames == indexp.data.data.colnames
|
196182825e9df1a9b5b8610afdf8b73d98eca8c93af063676d6d666c394fd0db | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.table.table_helpers import ArrayWrapper
from astropy.coordinates.earth import EarthLocation
from astropy.units.quantity import Quantity
from collections import OrderedDict
from contextlib import nullcontext
import pytest
import numpy as np
from astropy.table import Table, QTable, TableMergeError, Column, MaskedColumn, NdarrayMixin
from astropy.table.operations import _get_out_class, join_skycoord, join_distance
from astropy import units as u
from astropy.utils import metadata
from astropy.utils.metadata import MergeConflictError
from astropy import table
from astropy.time import Time, TimeDelta
from astropy.coordinates import (SkyCoord, SphericalRepresentation,
UnitSphericalRepresentation,
CartesianRepresentation,
BaseRepresentationOrDifferential,
search_around_3d)
from astropy.coordinates.tests.test_representation import representation_equal
from astropy.coordinates.tests.helper import skycoord_equal
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
def sort_eq(list1, list2):
return sorted(list1) == sorted(list2)
def check_mask(col, exp_mask):
"""Check that col.mask == exp_mask"""
if hasattr(col, 'mask'):
# Coerce expected mask into dtype of col.mask. In particular this is
# needed for types like EarthLocation where the mask is a structured
# array.
exp_mask = np.array(exp_mask).astype(col.mask.dtype)
out = np.all(col.mask == exp_mask)
else:
# With no mask the check is OK if all the expected mask values
# are False (i.e. no auto-conversion to MaskedQuantity if it was
# not required by the join).
out = np.all(exp_mask == False)
return out
class TestJoin():
def _setup(self, t_cls=Table):
lines1 = [' a b c ',
' 0 foo L1',
' 1 foo L2',
' 1 bar L3',
' 2 bar L4']
lines2 = [' a b d ',
' 1 foo R1',
' 1 foo R2',
' 2 bar R3',
' 4 bar R4']
self.t1 = t_cls.read(lines1, format='ascii')
self.t2 = t_cls.read(lines2, format='ascii')
self.t3 = t_cls(self.t2, copy=True)
self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
self.t3.meta.update(OrderedDict([('b', 3), ('c', [1, 2]), ('d', 2), ('a', 1)]))
self.meta_merge = OrderedDict([('b', [1, 2, 3, 4]),
('c', {'a': 1, 'b': 1}),
('d', 1),
('a', 1)])
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.join(self.t1, self.t2, join_type='inner')
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.join(self.t1, self.t3, join_type='inner')
assert len(w) == 3
assert out.meta == self.t3.meta
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='warn')
assert len(w) == 3
assert out.meta == self.t3.meta
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='silent')
assert out.meta == self.t3.meta
with pytest.raises(MergeConflictError):
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='error')
with pytest.raises(ValueError):
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='nonsense')
def test_both_unmasked_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Basic join with default parameters (inner join on common keys)
t12 = table.join(t1, t2)
assert type(t12) is operation_table_type
assert type(t12['a']) is type(t1['a']) # noqa
assert type(t12['b']) is type(t1['b']) # noqa
assert type(t12['c']) is type(t1['c']) # noqa
assert type(t12['d']) is type(t2['d']) # noqa
assert t12.masked is False
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3'])
# Table meta merged properly
assert t12.meta == self.meta_merge
def test_both_unmasked_left_right_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Left join
t12 = table.join(t1, t2, join_type='left')
assert t12.has_masked_columns is True
assert t12.masked is False
for name in ('a', 'b', 'c'):
assert type(t12[name]) is Column
assert type(t12['d']) is MaskedColumn
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 foo L1 --',
' 1 bar L3 --',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3'])
# Right join
t12 = table.join(t1, t2, join_type='right')
assert t12.has_masked_columns is True
assert t12.masked is False
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3',
' 4 bar -- R4'])
# Outer join
t12 = table.join(t1, t2, join_type='outer')
assert t12.has_masked_columns is True
assert t12.masked is False
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 foo L1 --',
' 1 bar L3 --',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3',
' 4 bar -- R4'])
# Check that the common keys are 'a', 'b'
t12a = table.join(t1, t2, join_type='outer')
t12b = table.join(t1, t2, join_type='outer', keys=['a', 'b'])
assert np.all(t12a.as_array() == t12b.as_array())
def test_both_unmasked_single_key_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Inner join on 'a' column
t12 = table.join(t1, t2, keys='a')
assert type(t12) is operation_table_type
assert type(t12['a']) is type(t1['a']) # noqa
assert type(t12['b_1']) is type(t1['b']) # noqa
assert type(t12['c']) is type(t1['c']) # noqa
assert type(t12['b_2']) is type(t2['b']) # noqa
assert type(t12['d']) is type(t2['d']) # noqa
assert t12.masked is False
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3'])
def test_both_unmasked_single_key_left_right_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Left join
t12 = table.join(t1, t2, join_type='left', keys='a')
assert t12.has_masked_columns is True
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 0 foo L1 -- --',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3'])
# Right join
t12 = table.join(t1, t2, join_type='right', keys='a')
assert t12.has_masked_columns is True
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3',
' 4 -- -- bar R4'])
# Outer join
t12 = table.join(t1, t2, join_type='outer', keys='a')
assert t12.has_masked_columns is True
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 0 foo L1 -- --',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3',
' 4 -- -- bar R4'])
def test_masked_unmasked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t1m = operation_table_type(self.t1, masked=True)
t2 = self.t2
# Result table is never masked
t1m2 = table.join(t1m, t2, join_type='inner')
assert t1m2.masked is False
# Result should match non-masked result
t12 = table.join(t1, t2)
assert np.all(t12.as_array() == np.array(t1m2))
# Mask out some values in left table and make sure they propagate
t1m['b'].mask[1] = True
t1m['c'].mask[2] = True
t1m2 = table.join(t1m, t2, join_type='inner', keys='a')
assert sort_eq(t1m2.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 -- L2 foo R1',
' 1 -- L2 foo R2',
' 1 bar -- foo R1',
' 1 bar -- foo R2',
' 2 bar L4 bar R3'])
t21m = table.join(t2, t1m, join_type='inner', keys='a')
assert sort_eq(t21m.pformat(), [' a b_1 d b_2 c ',
'--- --- --- --- ---',
' 1 foo R2 -- L2',
' 1 foo R2 bar --',
' 1 foo R1 -- L2',
' 1 foo R1 bar --',
' 2 bar R3 bar L4'])
def test_masked_masked(self, operation_table_type):
self._setup(operation_table_type)
"""Two masked tables"""
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
t1 = self.t1
t1m = operation_table_type(self.t1, masked=True)
t2 = self.t2
t2m = operation_table_type(self.t2, masked=True)
# Result table is never masked but original column types are preserved
t1m2m = table.join(t1m, t2m, join_type='inner')
assert t1m2m.masked is False
for col in t1m2m.itercols():
assert type(col) is MaskedColumn
# Result should match non-masked result
t12 = table.join(t1, t2)
assert np.all(t12.as_array() == np.array(t1m2m))
# Mask out some values in both tables and make sure they propagate
t1m['b'].mask[1] = True
t1m['c'].mask[2] = True
t2m['d'].mask[2] = True
t1m2m = table.join(t1m, t2m, join_type='inner', keys='a')
assert sort_eq(t1m2m.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 -- L2 foo R1',
' 1 -- L2 foo R2',
' 1 bar -- foo R1',
' 1 bar -- foo R2',
' 2 bar L4 bar --'])
def test_classes(self):
"""Ensure that classes and subclasses get through as expected"""
class MyCol(Column):
pass
class MyMaskedCol(MaskedColumn):
pass
t1 = Table()
t1['a'] = MyCol([1])
t1['b'] = MyCol([2])
t1['c'] = MyMaskedCol([3])
t2 = Table()
t2['a'] = Column([1, 2])
t2['d'] = MyCol([3, 4])
t2['e'] = MyMaskedCol([5, 6])
t12 = table.join(t1, t2, join_type='inner')
for name, exp_type in (('a', MyCol), ('b', MyCol), ('c', MyMaskedCol),
('d', MyCol), ('e', MyMaskedCol)):
assert type(t12[name] is exp_type)
t21 = table.join(t2, t1, join_type='left')
# Note col 'b' gets upgraded from MyCol to MaskedColumn since it needs to be
# masked, but col 'c' stays since MyMaskedCol supports masking.
for name, exp_type in (('a', MyCol), ('b', MaskedColumn), ('c', MyMaskedCol),
('d', MyCol), ('e', MyMaskedCol)):
assert type(t21[name] is exp_type)
def test_col_rename(self, operation_table_type):
self._setup(operation_table_type)
"""
Test auto col renaming when there is a conflict. Use
non-default values of uniq_col_name and table_names.
"""
t1 = self.t1
t2 = self.t2
t12 = table.join(t1, t2, uniq_col_name='x_{table_name}_{col_name}_y',
table_names=['L', 'R'], keys='a')
assert t12.colnames == ['a', 'x_L_b_y', 'c', 'x_R_b_y', 'd']
def test_rename_conflict(self, operation_table_type):
self._setup(operation_table_type)
"""
Test that auto-column rename fails because of a conflict
with an existing column
"""
t1 = self.t1
t2 = self.t2
t1['b_1'] = 1 # Add a new column b_1 that will conflict with auto-rename
with pytest.raises(TableMergeError):
table.join(t1, t2, keys='a')
def test_missing_keys(self, operation_table_type):
self._setup(operation_table_type)
"""Merge on a key column that doesn't exist"""
t1 = self.t1
t2 = self.t2
with pytest.raises(TableMergeError):
table.join(t1, t2, keys=['a', 'not there'])
def test_bad_join_type(self, operation_table_type):
self._setup(operation_table_type)
"""Bad join_type input"""
t1 = self.t1
t2 = self.t2
with pytest.raises(ValueError):
table.join(t1, t2, join_type='illegal value')
def test_no_common_keys(self, operation_table_type):
self._setup(operation_table_type)
"""Merge tables with no common keys"""
t1 = self.t1
t2 = self.t2
del t1['a']
del t1['b']
del t2['a']
del t2['b']
with pytest.raises(TableMergeError):
table.join(t1, t2)
def test_masked_key_column(self, operation_table_type):
self._setup(operation_table_type)
"""Merge on a key column that has a masked element"""
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
t1 = self.t1
t2 = operation_table_type(self.t2, masked=True)
table.join(t1, t2) # OK
t2['a'].mask[0] = True
with pytest.raises(TableMergeError):
table.join(t1, t2)
def test_col_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t2.rename_column('d', 'c') # force col conflict and renaming
meta1 = OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])
meta2 = OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])
# Key col 'a', should first value ('cm')
t1['a'].unit = 'cm'
t2['a'].unit = 'm'
# Key col 'b', take first value 't1_b'
t1['b'].info.description = 't1_b'
# Key col 'b', take first non-empty value 't1_b'
t2['b'].info.format = '%6s'
# Key col 'a', should be merged meta
t1['a'].info.meta = meta1
t2['a'].info.meta = meta2
# Key col 'b', should be meta2
t2['b'].info.meta = meta2
# All these should pass through
t1['c'].info.format = '%3s'
t1['c'].info.description = 't1_c'
t2['c'].info.format = '%6s'
t2['c'].info.description = 't2_c'
if operation_table_type is Table:
ctx = pytest.warns(metadata.MergeConflictWarning, match=r"In merged column 'a' the 'unit' attribute does not match \(cm != m\)") # noqa
else:
ctx = nullcontext()
with ctx:
t12 = table.join(t1, t2, keys=['a', 'b'])
assert t12['a'].unit == 'm'
assert t12['b'].info.description == 't1_b'
assert t12['b'].info.format == '%6s'
assert t12['a'].info.meta == self.meta_merge
assert t12['b'].info.meta == meta2
assert t12['c_1'].info.format == '%3s'
assert t12['c_1'].info.description == 't1_c'
assert t12['c_2'].info.format == '%6s'
assert t12['c_2'].info.description == 't2_c'
def test_join_multidimensional(self, operation_table_type):
self._setup(operation_table_type)
# Regression test for #2984, which was an issue where join did not work
# on multi-dimensional columns.
t1 = operation_table_type()
t1['a'] = [1, 2, 3]
t1['b'] = np.ones((3, 4))
t2 = operation_table_type()
t2['a'] = [1, 2, 3]
t2['c'] = [4, 5, 6]
t3 = table.join(t1, t2)
np.testing.assert_allclose(t3['a'], t1['a'])
np.testing.assert_allclose(t3['b'], t1['b'])
np.testing.assert_allclose(t3['c'], t2['c'])
def test_join_multidimensional_masked(self, operation_table_type):
self._setup(operation_table_type)
"""
Test for outer join with multidimensional columns where masking is required.
(Issue #4059).
"""
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
a = table.MaskedColumn([1, 2, 3], name='a')
a2 = table.Column([1, 3, 4], name='a')
b = table.MaskedColumn([[1, 2],
[3, 4],
[5, 6]],
name='b',
mask=[[1, 0],
[0, 1],
[0, 0]])
c = table.Column([[1, 1],
[2, 2],
[3, 3]],
name='c')
t1 = operation_table_type([a, b])
t2 = operation_table_type([a2, c])
t12 = table.join(t1, t2, join_type='inner')
assert np.all(t12['b'].mask == [[True, False],
[False, False]])
assert not hasattr(t12['c'], 'mask')
t12 = table.join(t1, t2, join_type='outer')
assert np.all(t12['b'].mask == [[True, False],
[False, True],
[False, False],
[True, True]])
assert np.all(t12['c'].mask == [[False, False],
[True, True],
[False, False],
[False, False]])
def test_mixin_functionality(self, mixin_cols):
col = mixin_cols['m']
cls_name = type(col).__name__
len_col = len(col)
idx = np.arange(len_col)
t1 = table.QTable([idx, col], names=['idx', 'm1'])
t2 = table.QTable([idx, col], names=['idx', 'm2'])
# Set up join mismatches for different join_type cases
t1 = t1[[0, 1, 3]]
t2 = t2[[0, 2, 3]]
# Test inner join, which works for all mixin_cols
out = table.join(t1, t2, join_type='inner')
assert len(out) == 2
assert out['m2'].__class__ is col.__class__
assert np.all(out['idx'] == [0, 3])
if cls_name == 'SkyCoord':
# SkyCoord doesn't support __eq__ so use our own
assert skycoord_equal(out['m1'], col[[0, 3]])
assert skycoord_equal(out['m2'], col[[0, 3]])
elif 'Repr' in cls_name or 'Diff' in cls_name:
assert np.all(representation_equal(out['m1'], col[[0, 3]]))
assert np.all(representation_equal(out['m2'], col[[0, 3]]))
else:
assert np.all(out['m1'] == col[[0, 3]])
assert np.all(out['m2'] == col[[0, 3]])
# Check for left, right, outer join which requires masking. Works for
# the listed mixins classes.
if isinstance(col, (Quantity, Time, TimeDelta)):
out = table.join(t1, t2, join_type='left')
assert len(out) == 3
assert np.all(out['idx'] == [0, 1, 3])
assert np.all(out['m1'] == t1['m1'])
assert np.all(out['m2'] == t2['m2'])
check_mask(out['m1'], [False, False, False])
check_mask(out['m2'], [False, True, False])
out = table.join(t1, t2, join_type='right')
assert len(out) == 3
assert np.all(out['idx'] == [0, 2, 3])
assert np.all(out['m1'] == t1['m1'])
assert np.all(out['m2'] == t2['m2'])
check_mask(out['m1'], [False, True, False])
check_mask(out['m2'], [False, False, False])
out = table.join(t1, t2, join_type='outer')
assert len(out) == 4
assert np.all(out['idx'] == [0, 1, 2, 3])
assert np.all(out['m1'] == col)
assert np.all(out['m2'] == col)
assert check_mask(out['m1'], [False, False, True, False])
assert check_mask(out['m2'], [False, True, False, False])
else:
# Otherwise make sure it fails with the right exception message
for join_type in ('outer', 'left', 'right'):
with pytest.raises(NotImplementedError) as err:
table.join(t1, t2, join_type=join_type)
assert ('join requires masking' in str(err.value)
or 'join unavailable' in str(err.value))
def test_cartesian_join(self, operation_table_type):
t1 = Table(rows=[(1, 'a'),
(2, 'b')], names=['a', 'b'])
t2 = Table(rows=[(3, 'c'),
(4, 'd')], names=['a', 'c'])
t12 = table.join(t1, t2, join_type='cartesian')
assert t1.colnames == ['a', 'b']
assert t2.colnames == ['a', 'c']
assert len(t12) == len(t1) * len(t2)
assert str(t12).splitlines() == [
'a_1 b a_2 c ',
'--- --- --- ---',
' 1 a 3 c',
' 1 a 4 d',
' 2 b 3 c',
' 2 b 4 d']
with pytest.raises(ValueError, match='cannot supply keys for a cartesian join'):
t12 = table.join(t1, t2, join_type='cartesian', keys='a')
@pytest.mark.skipif('not HAS_SCIPY')
def test_join_with_join_skycoord_sky(self):
sc1 = SkyCoord([0, 1, 1.1, 2], [0, 0, 0, 0], unit='deg')
sc2 = SkyCoord([0.5, 1.05, 2.1], [0, 0, 0], unit='deg')
t1 = Table([sc1], names=['sc'])
t2 = Table([sc2], names=['sc'])
t12 = table.join(t1, t2, join_funcs={'sc': join_skycoord(0.2 * u.deg)})
exp = ['sc_id sc_1 sc_2 ',
' deg,deg deg,deg ',
'----- ------- --------',
' 1 1.0,0.0 1.05,0.0',
' 1 1.1,0.0 1.05,0.0',
' 2 2.0,0.0 2.1,0.0']
assert str(t12).splitlines() == exp
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('distance_func', ['search_around_3d', search_around_3d])
def test_join_with_join_skycoord_3d(self, distance_func):
sc1 = SkyCoord([0, 1, 1.1, 2]*u.deg, [0, 0, 0, 0]*u.deg, [1, 1, 2, 1]*u.m)
sc2 = SkyCoord([0.5, 1.05, 2.1]*u.deg, [0, 0, 0]*u.deg, [1, 1, 1]*u.m)
t1 = Table([sc1], names=['sc'])
t2 = Table([sc2], names=['sc'])
join_func = join_skycoord(np.deg2rad(0.2) * u.m,
distance_func=distance_func)
t12 = table.join(t1, t2, join_funcs={'sc': join_func})
exp = ['sc_id sc_1 sc_2 ',
' deg,deg,m deg,deg,m ',
'----- ----------- ------------',
' 1 1.0,0.0,1.0 1.05,0.0,1.0',
' 2 2.0,0.0,1.0 2.1,0.0,1.0']
assert str(t12).splitlines() == exp
@pytest.mark.skipif('not HAS_SCIPY')
def test_join_with_join_distance_1d(self):
c1 = [0, 1, 1.1, 2]
c2 = [0.5, 1.05, 2.1]
t1 = Table([c1], names=['col'])
t2 = Table([c2], names=['col'])
join_func = join_distance(0.2,
kdtree_args={'leafsize': 32},
query_args={'p': 2})
t12 = table.join(t1, t2, join_type='outer', join_funcs={'col': join_func})
exp = ['col_id col_1 col_2',
'------ ----- -----',
' 1 1.0 1.05',
' 1 1.1 1.05',
' 2 2.0 2.1',
' 3 0.0 --',
' 4 -- 0.5']
assert str(t12).splitlines() == exp
@pytest.mark.skipif('not HAS_SCIPY')
def test_join_with_join_distance_1d_multikey(self):
from astropy.table.operations import _apply_join_funcs
c1 = [0, 1, 1.1, 1.2, 2]
id1 = [0, 1, 2, 2, 3]
o1 = ['a', 'b', 'c', 'd', 'e']
c2 = [0.5, 1.05, 2.1]
id2 = [0, 2, 4]
o2 = ['z', 'y', 'x']
t1 = Table([c1, id1, o1], names=['col', 'id', 'o1'])
t2 = Table([c2, id2, o2], names=['col', 'id', 'o2'])
join_func = join_distance(0.2)
join_funcs = {'col': join_func}
t12 = table.join(t1, t2, join_type='outer', join_funcs=join_funcs)
exp = ['col_id col_1 id o1 col_2 o2',
'------ ----- --- --- ----- ---',
' 1 1.0 1 b -- --',
' 1 1.1 2 c 1.05 y',
' 1 1.2 2 d 1.05 y',
' 2 2.0 3 e -- --',
' 2 -- 4 -- 2.1 x',
' 3 0.0 0 a -- --',
' 4 -- 0 -- 0.5 z']
assert str(t12).splitlines() == exp
left, right, keys = _apply_join_funcs(t1, t2, ('col', 'id'), join_funcs)
assert keys == ('col_id', 'id')
@pytest.mark.skipif('not HAS_SCIPY')
def test_join_with_join_distance_1d_quantity(self):
c1 = [0, 1, 1.1, 2] * u.m
c2 = [500, 1050, 2100] * u.mm
t1 = QTable([c1], names=['col'])
t2 = QTable([c2], names=['col'])
join_func = join_distance(20 * u.cm)
t12 = table.join(t1, t2, join_funcs={'col': join_func})
exp = ['col_id col_1 col_2 ',
' m mm ',
'------ ----- ------',
' 1 1.0 1050.0',
' 1 1.1 1050.0',
' 2 2.0 2100.0']
assert str(t12).splitlines() == exp
# Generate column name conflict
t2['col_id'] = [0, 0, 0]
t2['col__id'] = [0, 0, 0]
t12 = table.join(t1, t2, join_funcs={'col': join_func})
exp = ['col___id col_1 col_2 col_id col__id',
' m mm ',
'-------- ----- ------ ------ -------',
' 1 1.0 1050.0 0 0',
' 1 1.1 1050.0 0 0',
' 2 2.0 2100.0 0 0']
assert str(t12).splitlines() == exp
@pytest.mark.skipif('not HAS_SCIPY')
def test_join_with_join_distance_2d(self):
c1 = np.array([[0, 1, 1.1, 2],
[0, 0, 1, 0]]).transpose()
c2 = np.array([[0.5, 1.05, 2.1],
[0, 0, 0]]).transpose()
t1 = Table([c1], names=['col'])
t2 = Table([c2], names=['col'])
join_func = join_distance(0.2,
kdtree_args={'leafsize': 32},
query_args={'p': 2})
t12 = table.join(t1, t2, join_type='outer', join_funcs={'col': join_func})
exp = ['col_id col_1 col_2 ',
f'{t12["col_id"].dtype.name} float64[2] float64[2]', # int32 or int64
'------ ---------- -----------',
' 1 1.0 .. 0.0 1.05 .. 0.0',
' 2 2.0 .. 0.0 2.1 .. 0.0',
' 3 0.0 .. 0.0 -- .. --',
' 4 1.1 .. 1.0 -- .. --',
' 5 -- .. -- 0.5 .. 0.0']
assert t12.pformat(show_dtype=True) == exp
def test_keys_left_right_basic(self):
"""Test using the keys_left and keys_right args to specify different
join keys. This takes the standard test case but renames column 'a'
to 'x' and 'y' respectively for tables 1 and 2. Then it compares the
normal join on 'a' to the new join on 'x' and 'y'."""
self._setup()
for join_type in ('inner', 'left', 'right', 'outer'):
t1 = self.t1.copy()
t2 = self.t2.copy()
# Expected is same as joining on 'a' but with names 'x', 'y' instead
t12_exp = table.join(t1, t2, keys='a', join_type=join_type)
t12_exp.add_column(t12_exp['a'], name='x', index=1)
t12_exp.add_column(t12_exp['a'], name='y', index=len(t1.colnames) + 1)
del t12_exp['a']
# Different key names
t1.rename_column('a', 'x')
t2.rename_column('a', 'y')
keys_left_list = ['x'] # Test string key name
keys_right_list = [['y']] # Test list of string key names
if join_type == 'outer':
# Just do this for the outer join (others are the same)
keys_left_list.append([t1['x'].tolist()]) # Test list key column
keys_right_list.append([t2['y']]) # Test Column key column
for keys_left, keys_right in zip(keys_left_list, keys_right_list):
t12 = table.join(t1, t2, keys_left=keys_left, keys_right=keys_right,
join_type=join_type)
assert t12.colnames == t12_exp.colnames
for col in t12.values_equal(t12_exp).itercols():
assert np.all(col)
assert t12_exp.meta == t12.meta
def test_keys_left_right_exceptions(self):
"""Test exceptions using the keys_left and keys_right args to specify
different join keys.
"""
self._setup()
t1 = self.t1
t2 = self.t2
msg = r"left table does not have key column 'z'"
with pytest.raises(ValueError, match=msg):
table.join(t1, t2, keys_left='z', keys_right=['a'])
msg = r"left table has different length from key \[1, 2\]"
with pytest.raises(ValueError, match=msg):
table.join(t1, t2, keys_left=[[1, 2]], keys_right=['a'])
msg = r"keys arg must be None if keys_left and keys_right are supplied"
with pytest.raises(ValueError, match=msg):
table.join(t1, t2, keys_left='z', keys_right=['a'], keys='a')
msg = r"keys_left and keys_right args must have same length"
with pytest.raises(ValueError, match=msg):
table.join(t1, t2, keys_left=['a', 'b'], keys_right=['a'])
msg = r"keys_left and keys_right must both be provided"
with pytest.raises(ValueError, match=msg):
table.join(t1, t2, keys_left=['a', 'b'])
msg = r"cannot supply join_funcs arg and keys_left / keys_right"
with pytest.raises(ValueError, match=msg):
table.join(t1, t2, keys_left=['a'], keys_right=['a'], join_funcs={})
def test_join_structured_column(self):
"""Regression tests for gh-13271."""
# Two tables with matching names, including a structured column.
t1 = Table([np.array([(1., 1), (2., 2)], dtype=[('f', 'f8'), ('i', 'i8')]),
['one', 'two']], names=['structured', 'string'])
t2 = Table([np.array([(2., 2), (4., 4)], dtype=[('f', 'f8'), ('i', 'i8')]),
['three', 'four']], names=['structured', 'string'])
t12 = table.join(t1, t2, ['structured'], join_type='outer')
assert t12.pformat() == [
'structured [f, i] string_1 string_2',
'----------------- -------- --------',
' (1., 1) one --',
' (2., 2) two three',
' (4., 4) -- four']
class TestSetdiff():
def _setup(self, t_cls=Table):
lines1 = [' a b ',
' 0 foo ',
' 1 foo ',
' 1 bar ',
' 2 bar ']
lines2 = [' a b ',
' 0 foo ',
' 3 foo ',
' 4 bar ',
' 2 bar ']
lines3 = [' a b d ',
' 0 foo R1',
' 8 foo R2',
' 1 bar R3',
' 4 bar R4']
self.t1 = t_cls.read(lines1, format='ascii')
self.t2 = t_cls.read(lines2, format='ascii')
self.t3 = t_cls.read(lines3, format='ascii')
def test_default_same_columns(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t2)
assert type(out['a']) is type(self.t1['a']) # noqa
assert type(out['b']) is type(self.t1['b']) # noqa
assert out.pformat() == [' a b ',
'--- ---',
' 1 bar',
' 1 foo']
def test_default_same_tables(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t1)
assert type(out['a']) is type(self.t1['a']) # noqa
assert type(out['b']) is type(self.t1['b']) # noqa
assert out.pformat() == [' a b ',
'--- ---']
def test_extra_col_left_table(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.setdiff(self.t3, self.t1)
def test_extra_col_right_table(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t3)
assert type(out['a']) is type(self.t1['a']) # noqa
assert type(out['b']) is type(self.t1['b']) # noqa
assert out.pformat() == [' a b ',
'--- ---',
' 1 foo',
' 2 bar']
def test_keys(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t3, self.t1, keys=['a', 'b'])
assert type(out['a']) is type(self.t1['a']) # noqa
assert type(out['b']) is type(self.t1['b']) # noqa
assert out.pformat() == [' a b d ',
'--- --- ---',
' 4 bar R4',
' 8 foo R2']
def test_missing_key(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.setdiff(self.t3, self.t1, keys=['a', 'd'])
class TestVStack():
def _setup(self, t_cls=Table):
self.t1 = t_cls.read([' a b',
' 0. foo',
' 1. bar'], format='ascii')
self.t2 = t_cls.read([' a b c',
' 2. pez 4',
' 3. sez 5'], format='ascii')
self.t3 = t_cls.read([' a b',
' 4. 7',
' 5. 8',
' 6. 9'], format='ascii')
self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table)
# The following table has meta-data that conflicts with t1
self.t5 = t_cls(self.t1, copy=True)
self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
self.t4.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
self.t5.meta.update(OrderedDict([('b', 3), ('c', 'k'), ('d', 1)]))
self.meta_merge = OrderedDict([('b', [1, 2, 3, 4, 5, 6]),
('c', {'a': 1, 'b': 1, 'c': 1}),
('d', 1),
('a', 1),
('e', 1)])
def test_validate_join_type(self):
self._setup()
with pytest.raises(TypeError, match='Did you accidentally call vstack'):
table.vstack(self.t1, self.t2)
def test_stack_rows(self, operation_table_type):
self._setup(operation_table_type)
t2 = self.t1.copy()
t2.meta.clear()
out = table.vstack([self.t1, t2[1]])
assert type(out['a']) is type(self.t1['a']) # noqa
assert type(out['b']) is type(self.t1['b']) # noqa
assert out.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'1.0 bar']
def test_stack_table_column(self, operation_table_type):
self._setup(operation_table_type)
t2 = self.t1.copy()
t2.meta.clear()
out = table.vstack([self.t1, t2['a']])
assert out.masked is False
assert out.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'0.0 --',
'1.0 --']
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.vstack([self.t1, self.t2, self.t4], join_type='inner')
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.vstack([self.t1, self.t5], join_type='inner')
assert len(w) == 2
assert out.meta == self.t5.meta
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='warn')
assert len(w) == 2
assert out.meta == self.t5.meta
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='silent')
assert out.meta == self.t5.meta
with pytest.raises(MergeConflictError):
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='error')
with pytest.raises(ValueError):
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='nonsense')
def test_bad_input_type(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.vstack([])
with pytest.raises(TypeError):
table.vstack(1)
with pytest.raises(TypeError):
table.vstack([self.t2, 1])
with pytest.raises(ValueError):
table.vstack([self.t1, self.t2], join_type='invalid join type')
def test_stack_basic_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t12 = table.vstack([t1, t2], join_type='inner')
assert t12.masked is False
assert type(t12) is operation_table_type
assert type(t12['a']) is type(t1['a']) # noqa
assert type(t12['b']) is type(t1['b']) # noqa
assert t12.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'2.0 pez',
'3.0 sez']
t124 = table.vstack([t1, t2, t4], join_type='inner')
assert type(t124) is operation_table_type
assert type(t12['a']) is type(t1['a']) # noqa
assert type(t12['b']) is type(t1['b']) # noqa
assert t124.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'2.0 pez',
'3.0 sez',
'0.0 foo',
'1.0 bar']
def test_stack_basic_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t12 = table.vstack([t1, t2], join_type='outer')
assert t12.masked is False
assert t12.pformat() == [' a b c ',
'--- --- ---',
'0.0 foo --',
'1.0 bar --',
'2.0 pez 4',
'3.0 sez 5']
t124 = table.vstack([t1, t2, t4], join_type='outer')
assert t124.masked is False
assert t124.pformat() == [' a b c ',
'--- --- ---',
'0.0 foo --',
'1.0 bar --',
'2.0 pez 4',
'3.0 sez 5',
'0.0 foo --',
'1.0 bar --']
def test_stack_incompatible(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, self.t3], join_type='inner')
assert ("The 'b' columns have incompatible types: {}"
.format([self.t1['b'].dtype.name, self.t3['b'].dtype.name])
in str(excinfo.value))
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, self.t3], join_type='outer')
assert "The 'b' columns have incompatible types:" in str(excinfo.value)
with pytest.raises(TableMergeError):
table.vstack([self.t1, self.t2], join_type='exact')
t1_reshape = self.t1.copy()
t1_reshape['b'].shape = [2, 1]
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, t1_reshape])
assert "have different shape" in str(excinfo.value)
def test_vstack_one_masked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t4 = self.t4
t4['b'].mask[1] = True
t14 = table.vstack([t1, t4])
assert t14.masked is False
assert t14.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'0.0 foo',
'1.0 --']
def test_col_meta_merge_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Key col 'a', should last value ('km')
t1['a'].info.unit = 'cm'
t2['a'].info.unit = 'm'
t4['a'].info.unit = 'km'
# Key col 'a' format should take last when all match
t1['a'].info.format = '%f'
t2['a'].info.format = '%f'
t4['a'].info.format = '%f'
# Key col 'b', take first value 't1_b'
t1['b'].info.description = 't1_b'
# Key col 'b', take first non-empty value '%6s'
t4['b'].info.format = '%6s'
# Key col 'a', should be merged meta
t1['a'].info.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
t2['a'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t4['a'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
# Key col 'b', should be meta2
t2['b'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
if operation_table_type is Table:
ctx = pytest.warns(metadata.MergeConflictWarning)
else:
ctx = nullcontext()
with ctx as warning_lines:
out = table.vstack([t1, t2, t4], join_type='inner')
if operation_table_type is Table:
assert len(warning_lines) == 2
assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)"
in str(warning_lines[0].message))
assert ("In merged column 'a' the 'unit' attribute does not match (m != km)"
in str(warning_lines[1].message))
# Check units are suitably ignored for a regular Table
assert out.pformat() == [' a b ',
' km ',
'-------- ------',
'0.000000 foo',
'1.000000 bar',
'2.000000 pez',
'3.000000 sez',
'0.000000 foo',
'1.000000 bar']
else:
# Check QTable correctly dealt with units.
assert out.pformat() == [' a b ',
' km ',
'-------- ------',
'0.000000 foo',
'0.000010 bar',
'0.002000 pez',
'0.003000 sez',
'0.000000 foo',
'1.000000 bar']
assert out['a'].info.unit == 'km'
assert out['a'].info.format == '%f'
assert out['b'].info.description == 't1_b'
assert out['b'].info.format == '%6s'
assert out['a'].info.meta == self.meta_merge
assert out['b'].info.meta == OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])
def test_col_meta_merge_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Key col 'a', should last value ('km')
t1['a'].unit = 'cm'
t2['a'].unit = 'm'
t4['a'].unit = 'km'
# Key col 'a' format should take last when all match
t1['a'].info.format = '%0d'
t2['a'].info.format = '%0d'
t4['a'].info.format = '%0d'
# Key col 'b', take first value 't1_b'
t1['b'].info.description = 't1_b'
# Key col 'b', take first non-empty value '%6s'
t4['b'].info.format = '%6s'
# Key col 'a', should be merged meta
t1['a'].info.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
t2['a'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t4['a'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
# Key col 'b', should be meta2
t2['b'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
# All these should pass through
t2['c'].unit = 'm'
t2['c'].info.format = '%6s'
t2['c'].info.description = 't2_c'
with pytest.warns(metadata.MergeConflictWarning) as warning_lines:
out = table.vstack([t1, t2, t4], join_type='outer')
assert len(warning_lines) == 2
assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)"
in str(warning_lines[0].message))
assert ("In merged column 'a' the 'unit' attribute does not match (m != km)"
in str(warning_lines[1].message))
assert out['a'].unit == 'km'
assert out['a'].info.format == '%0d'
assert out['b'].info.description == 't1_b'
assert out['b'].info.format == '%6s'
assert out['a'].info.meta == self.meta_merge
assert out['b'].info.meta == OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])
assert out['c'].info.unit == 'm'
assert out['c'].info.format == '%6s'
assert out['c'].info.description == 't2_c'
def test_vstack_one_table(self, operation_table_type):
self._setup(operation_table_type)
"""Regression test for issue #3313"""
assert (self.t1 == table.vstack(self.t1)).all()
assert (self.t1 == table.vstack([self.t1])).all()
def test_mixin_functionality(self, mixin_cols):
col = mixin_cols['m']
len_col = len(col)
t = table.QTable([col], names=['a'])
cls_name = type(col).__name__
# Vstack works for these classes:
if isinstance(col, (u.Quantity, Time, TimeDelta, SkyCoord, EarthLocation,
BaseRepresentationOrDifferential)):
out = table.vstack([t, t])
assert len(out) == len_col * 2
if cls_name == 'SkyCoord':
# Argh, SkyCoord needs __eq__!!
assert skycoord_equal(out['a'][len_col:], col)
assert skycoord_equal(out['a'][:len_col], col)
elif 'Repr' in cls_name or 'Diff' in cls_name:
assert np.all(representation_equal(out['a'][:len_col], col))
assert np.all(representation_equal(out['a'][len_col:], col))
else:
assert np.all(out['a'][:len_col] == col)
assert np.all(out['a'][len_col:] == col)
else:
with pytest.raises(NotImplementedError) as err:
table.vstack([t, t])
assert ('vstack unavailable for mixin column type(s): {}'
.format(cls_name) in str(err.value))
# Check for outer stack which requires masking. Only Time supports
# this currently.
t2 = table.QTable([col], names=['b']) # different from col name for t
if isinstance(col, (Time, TimeDelta, Quantity)):
out = table.vstack([t, t2], join_type='outer')
assert len(out) == len_col * 2
assert np.all(out['a'][:len_col] == col)
assert np.all(out['b'][len_col:] == col)
assert check_mask(out['a'], [False] * len_col + [True] * len_col)
assert check_mask(out['b'], [True] * len_col + [False] * len_col)
# check directly stacking mixin columns:
out2 = table.vstack([t, t2['b']])
assert np.all(out['a'] == out2['a'])
assert np.all(out['b'] == out2['b'])
else:
with pytest.raises(NotImplementedError) as err:
table.vstack([t, t2], join_type='outer')
assert ('vstack requires masking' in str(err.value)
or 'vstack unavailable' in str(err.value))
def test_vstack_different_representation(self):
"""Test that representations can be mixed together."""
rep1 = CartesianRepresentation([1, 2]*u.km, [3, 4]*u.km, 1*u.km)
rep2 = SphericalRepresentation([0]*u.deg, [0]*u.deg, 10*u.km)
t1 = Table([rep1])
t2 = Table([rep2])
t12 = table.vstack([t1, t2])
expected = CartesianRepresentation([1, 2, 10]*u.km,
[3, 4, 0]*u.km,
[1, 1, 0]*u.km)
assert np.all(representation_equal(t12['col0'], expected))
rep3 = UnitSphericalRepresentation([0]*u.deg, [0]*u.deg)
t3 = Table([rep3])
with pytest.raises(ValueError, match='representations are inconsistent'):
table.vstack([t1, t3])
def test_vstack_structured_column(self):
"""Regression tests for gh-13271."""
# Two tables with matching names, including a structured column.
t1 = Table([np.array([(1., 1), (2., 2)], dtype=[('f', 'f8'), ('i', 'i8')]),
['one', 'two']], names=['structured', 'string'])
t2 = Table([np.array([(3., 3), (4., 4)], dtype=[('f', 'f8'), ('i', 'i8')]),
['three', 'four']], names=['structured', 'string'])
t12 = table.vstack([t1, t2])
assert t12.pformat() == [
'structured [f, i] string',
'----------------- ------',
' (1., 1) one',
' (2., 2) two',
' (3., 3) three',
' (4., 4) four']
# One table without the structured column.
t3 = t2[('string',)]
t13 = table.vstack([t1, t3])
assert t13.pformat() == [
'structured [f, i] string',
'----------------- ------',
' (1.0, 1) one',
' (2.0, 2) two',
' -- three',
' -- four']
class TestDStack():
def _setup(self, t_cls=Table):
self.t1 = t_cls.read([' a b',
' 0. foo',
' 1. bar'], format='ascii')
self.t2 = t_cls.read([' a b c',
' 2. pez 4',
' 3. sez 5'], format='ascii')
self.t2['d'] = Time([1, 2], format='cxcsec')
self.t3 = t_cls({'a': [[5., 6.], [4., 3.]],
'b': [['foo', 'bar'], ['pez', 'sez']]},
names=('a', 'b'))
self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table)
self.t5 = t_cls({'a': [[4., 2.], [1., 6.]],
'b': [['foo', 'pez'], ['bar', 'sez']]},
names=('a', 'b'))
self.t6 = t_cls.read([' a b c',
' 7. pez 2',
' 4. sez 6',
' 6. foo 3'], format='ascii')
def test_validate_join_type(self):
self._setup()
with pytest.raises(TypeError, match='Did you accidentally call dstack'):
table.dstack(self.t1, self.t2)
@staticmethod
def compare_dstack(tables, out):
for ii, tbl in enumerate(tables):
for name, out_col in out.columns.items():
if name in tbl.colnames:
# Columns always compare equal
assert np.all(tbl[name] == out[name][:, ii])
# If input has a mask then output must have same mask
if hasattr(tbl[name], 'mask'):
assert np.all(tbl[name].mask == out[name].mask[:, ii])
# If input has no mask then output might have a mask (if other table
# is missing that column). If so then all mask values should be False.
elif hasattr(out[name], 'mask'):
assert not np.any(out[name].mask[:, ii])
else:
# Column missing for this table, out must have a mask with all True.
assert np.all(out[name].mask[:, ii])
def test_dstack_table_column(self, operation_table_type):
"""Stack a table with 3 cols and one column (gets auto-converted to Table).
"""
self._setup(operation_table_type)
t2 = self.t1.copy()
out = table.dstack([self.t1, t2['a']])
self.compare_dstack([self.t1, t2[('a',)]], out)
def test_dstack_basic_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t4['a'].mask[0] = True
# Test for non-masked table
t12 = table.dstack([t1, t2], join_type='outer')
assert type(t12) is operation_table_type
assert type(t12['a']) is type(t1['a']) # noqa
assert type(t12['b']) is type(t1['b']) # noqa
self.compare_dstack([t1, t2], t12)
# Test for masked table
t124 = table.dstack([t1, t2, t4], join_type='outer')
assert type(t124) is operation_table_type
assert type(t124['a']) is type(t4['a']) # noqa
assert type(t124['b']) is type(t4['b']) # noqa
self.compare_dstack([t1, t2, t4], t124)
def test_dstack_basic_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Test for masked table
t124 = table.dstack([t1, t2, t4], join_type='inner')
assert type(t124) is operation_table_type
assert type(t124['a']) is type(t4['a']) # noqa
assert type(t124['b']) is type(t4['b']) # noqa
self.compare_dstack([t1, t2, t4], t124)
def test_dstack_multi_dimension_column(self, operation_table_type):
self._setup(operation_table_type)
t3 = self.t3
t5 = self.t5
t2 = self.t2
t35 = table.dstack([t3, t5])
assert type(t35) is operation_table_type
assert type(t35['a']) is type(t3['a']) # noqa
assert type(t35['b']) is type(t3['b']) # noqa
self.compare_dstack([t3, t5], t35)
with pytest.raises(TableMergeError):
table.dstack([t2, t3])
def test_dstack_different_length_table(self, operation_table_type):
self._setup(operation_table_type)
t2 = self.t2
t6 = self.t6
with pytest.raises(ValueError):
table.dstack([t2, t6])
def test_dstack_single_table(self):
self._setup(Table)
out = table.dstack(self.t1)
assert np.all(out == self.t1)
def test_dstack_representation(self):
rep1 = SphericalRepresentation([1, 2]*u.deg, [3, 4]*u.deg, 1*u.kpc)
rep2 = SphericalRepresentation([10, 20]*u.deg, [30, 40]*u.deg, 10*u.kpc)
t1 = Table([rep1])
t2 = Table([rep2])
t12 = table.dstack([t1, t2])
assert np.all(representation_equal(t12['col0'][:, 0], rep1))
assert np.all(representation_equal(t12['col0'][:, 1], rep2))
def test_dstack_skycoord(self):
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg)
sc2 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg)
t1 = Table([sc1])
t2 = Table([sc2])
t12 = table.dstack([t1, t2])
assert skycoord_equal(sc1, t12['col0'][:, 0])
assert skycoord_equal(sc2, t12['col0'][:, 1])
def test_dstack_structured_column(self):
"""Regression tests for gh-13271."""
# Two tables with matching names, including a structured column.
t1 = Table([np.array([(1., 1), (2., 2)], dtype=[('f', 'f8'), ('i', 'i8')]),
['one', 'two']], names=['structured', 'string'])
t2 = Table([np.array([(3., 3), (4., 4)], dtype=[('f', 'f8'), ('i', 'i8')]),
['three', 'four']], names=['structured', 'string'])
t12 = table.dstack([t1, t2])
assert t12.pformat() == [
'structured [f, i] string ',
'------------------ ------------',
'(1., 1) .. (3., 3) one .. three',
'(2., 2) .. (4., 4) two .. four']
# One table without the structured column.
t3 = t2[('string',)]
t13 = table.dstack([t1, t3])
assert t13.pformat() == [
'structured [f, i] string ',
'----------------- ------------',
' (1.0, 1) .. -- one .. three',
' (2.0, 2) .. -- two .. four']
class TestHStack():
def _setup(self, t_cls=Table):
self.t1 = t_cls.read([' a b',
' 0. foo',
' 1. bar'], format='ascii')
self.t2 = t_cls.read([' a b c',
' 2. pez 4',
' 3. sez 5'], format='ascii')
self.t3 = t_cls.read([' d e',
' 4. 7',
' 5. 8',
' 6. 9'], format='ascii')
self.t4 = t_cls(self.t1, copy=True, masked=True)
self.t4['a'].name = 'f'
self.t4['b'].name = 'g'
# The following table has meta-data that conflicts with t1
self.t5 = t_cls(self.t1, copy=True)
self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
self.t4.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
self.t5.meta.update(OrderedDict([('b', 3), ('c', 'k'), ('d', 1)]))
self.meta_merge = OrderedDict([('b', [1, 2, 3, 4, 5, 6]),
('c', {'a': 1, 'b': 1, 'c': 1}),
('d', 1),
('a', 1),
('e', 1)])
def test_validate_join_type(self):
self._setup()
with pytest.raises(TypeError, match='Did you accidentally call hstack'):
table.hstack(self.t1, self.t2)
def test_stack_same_table(self, operation_table_type):
"""
From #2995, test that hstack'ing references to the same table has the
expected output.
"""
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t1])
assert out.masked is False
assert out.pformat() == ['a_1 b_1 a_2 b_2',
'--- --- --- ---',
'0.0 foo 0.0 foo',
'1.0 bar 1.0 bar']
def test_stack_rows(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1[0], self.t2[1]])
assert out.masked is False
assert out.pformat() == ['a_1 b_1 a_2 b_2 c ',
'--- --- --- --- ---',
'0.0 foo 3.0 sez 5']
def test_stack_columns(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t2['c']])
assert type(out['a']) is type(self.t1['a']) # noqa
assert type(out['b']) is type(self.t1['b']) # noqa
assert type(out['c']) is type(self.t2['c']) # noqa
assert out.pformat() == [' a b c ',
'--- --- ---',
'0.0 foo 4',
'1.0 bar 5']
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t2, self.t4], join_type='inner')
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.hstack([self.t1, self.t5], join_type='inner')
assert len(w) == 2
assert out.meta == self.t5.meta
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='warn')
assert len(w) == 2
assert out.meta == self.t5.meta
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='silent')
assert out.meta == self.t5.meta
with pytest.raises(MergeConflictError):
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='error')
with pytest.raises(ValueError):
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='nonsense')
def test_bad_input_type(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.hstack([])
with pytest.raises(TypeError):
table.hstack(1)
with pytest.raises(TypeError):
table.hstack([self.t2, 1])
with pytest.raises(ValueError):
table.hstack([self.t1, self.t2], join_type='invalid join type')
def test_stack_basic(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t3 = self.t3
t4 = self.t4
out = table.hstack([t1, t2], join_type='inner')
assert out.masked is False
assert type(out) is operation_table_type
assert type(out['a_1']) is type(t1['a']) # noqa
assert type(out['b_1']) is type(t1['b']) # noqa
assert type(out['a_2']) is type(t2['a']) # noqa
assert type(out['b_2']) is type(t2['b']) # noqa
assert out.pformat() == ['a_1 b_1 a_2 b_2 c ',
'--- --- --- --- ---',
'0.0 foo 2.0 pez 4',
'1.0 bar 3.0 sez 5']
# stacking as a list gives same result
out_list = table.hstack([t1, t2], join_type='inner')
assert out.pformat() == out_list.pformat()
out = table.hstack([t1, t2], join_type='outer')
assert out.pformat() == out_list.pformat()
out = table.hstack([t1, t2, t3, t4], join_type='outer')
assert out.masked is False
assert out.pformat() == ['a_1 b_1 a_2 b_2 c d e f g ',
'--- --- --- --- --- --- --- --- ---',
'0.0 foo 2.0 pez 4 4.0 7 0.0 foo',
'1.0 bar 3.0 sez 5 5.0 8 1.0 bar',
' -- -- -- -- -- 6.0 9 -- --']
out = table.hstack([t1, t2, t3, t4], join_type='inner')
assert out.masked is False
assert out.pformat() == ['a_1 b_1 a_2 b_2 c d e f g ',
'--- --- --- --- --- --- --- --- ---',
'0.0 foo 2.0 pez 4 4.0 7 0.0 foo',
'1.0 bar 3.0 sez 5 5.0 8 1.0 bar']
def test_stack_incompatible(self, operation_table_type):
self._setup(operation_table_type)
# For join_type exact, which will fail here because n_rows
# does not match
with pytest.raises(TableMergeError):
table.hstack([self.t1, self.t3], join_type='exact')
def test_hstack_one_masked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail()
self._setup(operation_table_type)
t1 = self.t1
t2 = operation_table_type(t1, copy=True, masked=True)
t2.meta.clear()
t2['b'].mask[1] = True
out = table.hstack([t1, t2])
assert out.pformat() == ['a_1 b_1 a_2 b_2',
'--- --- --- ---',
'0.0 foo 0.0 foo',
'1.0 bar 1.0 --']
def test_table_col_rename(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t2], join_type='inner',
uniq_col_name='{table_name}_{col_name}',
table_names=('left', 'right'))
assert out.masked is False
assert out.pformat() == ['left_a left_b right_a right_b c ',
'------ ------ ------- ------- ---',
' 0.0 foo 2.0 pez 4',
' 1.0 bar 3.0 sez 5']
def test_col_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t3 = self.t3[:2]
t4 = self.t4
# Just set a bunch of meta and make sure it is the same in output
meta1 = OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])
t1['a'].unit = 'cm'
t1['b'].info.description = 't1_b'
t4['f'].info.format = '%6s'
t1['b'].info.meta.update(meta1)
t3['d'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t4['g'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
t3['e'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t3['d'].unit = 'm'
t3['d'].info.format = '%6s'
t3['d'].info.description = 't3_c'
out = table.hstack([t1, t3, t4], join_type='exact')
for t in [t1, t3, t4]:
for name in t.colnames:
for attr in ('meta', 'unit', 'format', 'description'):
assert getattr(out[name].info, attr) == getattr(t[name].info, attr)
# Make sure we got a copy of meta, not ref
t1['b'].info.meta['b'] = None
assert out['b'].info.meta['b'] == [1, 2]
def test_hstack_one_table(self, operation_table_type):
self._setup(operation_table_type)
"""Regression test for issue #3313"""
assert (self.t1 == table.hstack(self.t1)).all()
assert (self.t1 == table.hstack([self.t1])).all()
def test_mixin_functionality(self, mixin_cols):
col1 = mixin_cols['m']
col2 = col1[2:4] # Shorter version of col1
t1 = table.QTable([col1])
t2 = table.QTable([col2])
cls_name = type(col1).__name__
out = table.hstack([t1, t2], join_type='inner')
assert type(out['col0_1']) is type(out['col0_2']) # noqa
assert len(out) == len(col2)
# Check that columns are as expected.
if cls_name == 'SkyCoord':
assert skycoord_equal(out['col0_1'], col1[:len(col2)])
assert skycoord_equal(out['col0_2'], col2)
elif 'Repr' in cls_name or 'Diff' in cls_name:
assert np.all(representation_equal(out['col0_1'], col1[:len(col2)]))
assert np.all(representation_equal(out['col0_2'], col2))
else:
assert np.all(out['col0_1'] == col1[:len(col2)])
assert np.all(out['col0_2'] == col2)
# Time class supports masking, all other mixins do not
if isinstance(col1, (Time, TimeDelta, Quantity)):
out = table.hstack([t1, t2], join_type='outer')
assert len(out) == len(t1)
assert np.all(out['col0_1'] == col1)
assert np.all(out['col0_2'][:len(col2)] == col2)
assert check_mask(out['col0_2'], [False, False, True, True])
# check directly stacking mixin columns:
out2 = table.hstack([t1, t2['col0']], join_type='outer')
assert np.all(out['col0_1'] == out2['col0_1'])
assert np.all(out['col0_2'] == out2['col0_2'])
else:
with pytest.raises(NotImplementedError) as err:
table.hstack([t1, t2], join_type='outer')
assert 'hstack requires masking' in str(err.value)
def test_unique(operation_table_type):
t = operation_table_type.read(
[' a b c d',
' 2 b 7.0 0',
' 1 c 3.0 5',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 1 a 1.0 7',
' 2 b 5.0 1',
' 0 a 0.0 4',
' 1 a 2.0 6',
' 1 c 3.0 5',
], format='ascii')
tu = operation_table_type(np.sort(t[:-1]))
t_all = table.unique(t)
assert sort_eq(t_all.pformat(), tu.pformat())
t_s = t.copy()
del t_s['b', 'c', 'd']
t_all = table.unique(t_s)
assert sort_eq(t_all.pformat(), [' a ',
'---',
' 0',
' 1',
' 2'])
key1 = 'a'
t1a = table.unique(t, key1)
assert sort_eq(t1a.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 c 3.0 5',
' 2 b 7.0 0'])
t1b = table.unique(t, key1, keep='last')
assert sort_eq(t1b.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 c 3.0 5',
' 2 b 5.0 1'])
t1c = table.unique(t, key1, keep='none')
assert sort_eq(t1c.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4'])
key2 = ['a', 'b']
t2a = table.unique(t, key2)
assert sort_eq(t2a.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 a 1.0 7',
' 1 c 3.0 5',
' 2 a 4.0 3',
' 2 b 7.0 0'])
t2b = table.unique(t, key2, keep='last')
assert sort_eq(t2b.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 a 2.0 6',
' 1 c 3.0 5',
' 2 a 4.0 3',
' 2 b 5.0 1'])
t2c = table.unique(t, key2, keep='none')
assert sort_eq(t2c.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 2 a 4.0 3'])
key2 = ['a', 'a']
with pytest.raises(ValueError) as exc:
t2a = table.unique(t, key2)
assert exc.value.args[0] == "duplicate key names"
with pytest.raises(ValueError) as exc:
table.unique(t, key2, keep=True)
assert exc.value.args[0] == (
"'keep' should be one of 'first', 'last', 'none'")
t1_m = operation_table_type(t1a, masked=True)
t1_m['a'].mask[1] = True
with pytest.raises(ValueError) as exc:
t1_mu = table.unique(t1_m)
assert exc.value.args[0] == (
"cannot use columns with masked values as keys; "
"remove column 'a' from keys and rerun unique()")
t1_mu = table.unique(t1_m, silent=True)
assert t1_mu.masked is False
assert t1_mu.pformat() == [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 2 b 7.0 0',
' -- c 3.0 5']
with pytest.raises(ValueError):
t1_mu = table.unique(t1_m, silent=True, keys='a')
t1_m = operation_table_type(t, masked=True)
t1_m['a'].mask[1] = True
t1_m['d'].mask[3] = True
# Test that multiple masked key columns get removed in the correct
# order
t1_mu = table.unique(t1_m, keys=['d', 'a', 'b'], silent=True)
assert t1_mu.masked is False
assert t1_mu.pformat() == [' a b c d ',
'--- --- --- ---',
' 2 a 4.0 --',
' 2 b 7.0 0',
' -- c 3.0 5']
def test_vstack_bytes(operation_table_type):
"""
Test for issue #5617 when vstack'ing bytes columns in Py3.
This is really an upstream numpy issue numpy/numpy/#8403.
"""
t = operation_table_type([[b'a']], names=['a'])
assert t['a'].itemsize == 1
t2 = table.vstack([t, t])
assert len(t2) == 2
assert t2['a'].itemsize == 1
def test_vstack_unicode():
"""
Test for problem related to issue #5617 when vstack'ing *unicode*
columns. In this case the character size gets multiplied by 4.
"""
t = table.Table([['a']], names=['a'])
assert t['a'].itemsize == 4 # 4-byte / char for U dtype
t2 = table.vstack([t, t])
assert len(t2) == 2
assert t2['a'].itemsize == 4
def test_join_mixins_time_quantity():
"""
Test for table join using non-ndarray key columns.
"""
tm1 = Time([2, 1, 2], format='cxcsec')
q1 = [2, 1, 1] * u.m
idx1 = [1, 2, 3]
tm2 = Time([2, 3], format='cxcsec')
q2 = [2, 3] * u.m
idx2 = [10, 20]
t1 = Table([tm1, q1, idx1], names=['tm', 'q', 'idx'])
t2 = Table([tm2, q2, idx2], names=['tm', 'q', 'idx'])
# Output:
#
# <Table length=4>
# tm q idx_1 idx_2
# m
# object float64 int64 int64
# ------------------ ------- ----- -----
# 0.9999999999969589 1.0 2 --
# 2.00000000000351 1.0 3 --
# 2.00000000000351 2.0 1 10
# 3.000000000000469 3.0 -- 20
t12 = table.join(t1, t2, join_type='outer', keys=['tm', 'q'])
# Key cols are lexically sorted
assert np.all(t12['tm'] == Time([1, 2, 2, 3], format='cxcsec'))
assert np.all(t12['q'] == [1, 1, 2, 3] * u.m)
assert np.all(t12['idx_1'] == np.ma.array([2, 3, 1, 0], mask=[0, 0, 0, 1]))
assert np.all(t12['idx_2'] == np.ma.array([0, 0, 10, 20], mask=[1, 1, 0, 0]))
def test_join_mixins_not_sortable():
"""
Test for table join using non-ndarray key columns that are not sortable.
"""
sc = SkyCoord([1, 2], [3, 4], unit='deg,deg')
t1 = Table([sc, [1, 2]], names=['sc', 'idx1'])
t2 = Table([sc, [10, 20]], names=['sc', 'idx2'])
with pytest.raises(TypeError, match='one or more key columns are not sortable'):
table.join(t1, t2, keys='sc')
def test_join_non_1d_key_column():
c1 = [[1, 2], [3, 4]]
c2 = [1, 2]
t1 = Table([c1, c2], names=['a', 'b'])
t2 = t1.copy()
with pytest.raises(ValueError, match="key column 'a' must be 1-d"):
table.join(t1, t2, keys='a')
def test_argsort_time_column():
"""Regression test for #10823."""
times = Time(['2016-01-01', '2018-01-01', '2017-01-01'])
t = Table([times], names=['time'])
i = t.argsort('time')
assert np.all(i == times.argsort())
def test_sort_indexed_table():
"""Test fix for #9473 and #6545 - and another regression test for #10823."""
t = Table([[1, 3, 2], [6, 4, 5]], names=('a', 'b'))
t.add_index('a')
t.sort('a')
assert np.all(t['a'] == [1, 2, 3])
assert np.all(t['b'] == [6, 5, 4])
t.sort('b')
assert np.all(t['b'] == [4, 5, 6])
assert np.all(t['a'] == [3, 2, 1])
times = ['2016-01-01', '2018-01-01', '2017-01-01']
tm = Time(times)
t2 = Table([tm, [3, 2, 1]], names=['time', 'flux'])
t2.sort('flux')
assert np.all(t2['flux'] == [1, 2, 3])
t2.sort('time')
assert np.all(t2['flux'] == [3, 1, 2])
assert np.all(t2['time'] == tm[[0, 2, 1]])
# Using the table as a TimeSeries implicitly sets the index, so
# this test is a bit different from the above.
from astropy.timeseries import TimeSeries
ts = TimeSeries(time=times)
ts['flux'] = [3, 2, 1]
ts.sort('flux')
assert np.all(ts['flux'] == [1, 2, 3])
ts.sort('time')
assert np.all(ts['flux'] == [3, 1, 2])
assert np.all(ts['time'] == tm[[0, 2, 1]])
def test_get_out_class():
c = table.Column([1, 2])
mc = table.MaskedColumn([1, 2])
q = [1, 2] * u.m
assert _get_out_class([c, mc]) is mc.__class__
assert _get_out_class([mc, c]) is mc.__class__
assert _get_out_class([c, c]) is c.__class__
assert _get_out_class([c]) is c.__class__
with pytest.raises(ValueError):
_get_out_class([c, q])
with pytest.raises(ValueError):
_get_out_class([q, c])
def test_masking_required_exception():
"""
Test that outer join, hstack and vstack fail for a mixin column which
does not support masking.
"""
col = table.NdarrayMixin([0, 1, 2, 3])
t1 = table.QTable([[1, 2, 3, 4], col], names=['a', 'b'])
t2 = table.QTable([[1, 2], col[:2]], names=['a', 'c'])
with pytest.raises(NotImplementedError) as err:
table.vstack([t1, t2], join_type='outer')
assert 'vstack unavailable' in str(err.value)
with pytest.raises(NotImplementedError) as err:
table.hstack([t1, t2], join_type='outer')
assert 'hstack requires masking' in str(err.value)
with pytest.raises(NotImplementedError) as err:
table.join(t1, t2, join_type='outer')
assert 'join requires masking' in str(err.value)
def test_stack_columns():
c = table.Column([1, 2])
mc = table.MaskedColumn([1, 2])
q = [1, 2] * u.m
time = Time(['2001-01-02T12:34:56', '2001-02-03T00:01:02'])
sc = SkyCoord([1, 2], [3, 4], unit='deg')
cq = table.Column([11, 22], unit=u.m)
t = table.hstack([c, q])
assert t.__class__ is table.QTable
assert t.masked is False
t = table.hstack([q, c])
assert t.__class__ is table.QTable
assert t.masked is False
t = table.hstack([mc, q])
assert t.__class__ is table.QTable
assert t.masked is False
t = table.hstack([c, mc])
assert t.__class__ is table.Table
assert t.masked is False
t = table.vstack([q, q])
assert t.__class__ is table.QTable
t = table.vstack([c, c])
assert t.__class__ is table.Table
t = table.hstack([c, time])
assert t.__class__ is table.Table
t = table.hstack([c, sc])
assert t.__class__ is table.Table
t = table.hstack([q, time, sc])
assert t.__class__ is table.QTable
with pytest.raises(ValueError):
table.vstack([c, q])
with pytest.raises(ValueError):
t = table.vstack([q, cq])
def test_mixin_join_regression():
# This used to trigger a ValueError:
# ValueError: NumPy boolean array indexing assignment cannot assign
# 6 input values to the 4 output values where the mask is true
t1 = QTable()
t1['index'] = [1, 2, 3, 4, 5]
t1['flux1'] = [2, 3, 2, 1, 1] * u.Jy
t1['flux2'] = [2, 3, 2, 1, 1] * u.Jy
t2 = QTable()
t2['index'] = [3, 4, 5, 6]
t2['flux1'] = [2, 1, 1, 3] * u.Jy
t2['flux2'] = [2, 1, 1, 3] * u.Jy
t12 = table.join(t1, t2, keys=('index', 'flux1', 'flux2'), join_type='outer')
assert len(t12) == 6
|
eb006a080678cd52a97c6455bd90f2a537a1e7ae8d7ceadfe9d0f3d05718e00b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.utils.tests.test_metadata import MetaBaseTest
import operator
import warnings
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from astropy.tests.helper import assert_follows_unicode_guidelines
from astropy import table
from astropy import time
from astropy import units as u
class TestColumn():
def test_subclass(self, Column):
c = Column(name='a')
assert isinstance(c, np.ndarray)
c2 = c * 2
assert isinstance(c2, Column)
assert isinstance(c2, np.ndarray)
def test_numpy_ops(self, Column):
"""Show that basic numpy operations with Column behave sensibly"""
arr = np.array([1, 2, 3])
c = Column(arr, name='a')
for op, test_equal in ((operator.eq, True),
(operator.ne, False),
(operator.ge, True),
(operator.gt, False),
(operator.le, True),
(operator.lt, False)):
for eq in (op(c, arr), op(arr, c)):
assert np.all(eq) if test_equal else not np.any(eq)
assert len(eq) == 3
if Column is table.Column:
assert type(eq) == np.ndarray
else:
assert type(eq) == np.ma.core.MaskedArray
assert eq.dtype.str == '|b1'
lt = c - 1 < arr
assert np.all(lt)
def test_numpy_boolean_ufuncs(self, Column):
"""Show that basic numpy operations with Column behave sensibly"""
arr = np.array([1, 2, 3])
c = Column(arr, name='a')
for ufunc, test_true in ((np.isfinite, True),
(np.isinf, False),
(np.isnan, False),
(np.sign, True),
(np.signbit, False)):
result = ufunc(c)
assert len(result) == len(c)
assert np.all(result) if test_true else not np.any(result)
if Column is table.Column:
assert type(result) == np.ndarray
else:
assert type(result) == np.ma.core.MaskedArray
if ufunc is not np.sign:
assert result.dtype.str == '|b1'
def test_view(self, Column):
c = np.array([1, 2, 3], dtype=np.int64).view(Column)
assert repr(c) == f"<{Column.__name__} dtype='int64' length=3>\n1\n2\n3"
def test_format(self, Column):
"""Show that the formatted output from str() works"""
from astropy import conf
with conf.set_temp('max_lines', 8):
c1 = Column(np.arange(2000), name='a', dtype=float,
format='%6.2f')
assert str(c1).splitlines() == [' a ',
'-------',
' 0.00',
' 1.00',
' ...',
'1998.00',
'1999.00',
'Length = 2000 rows']
def test_convert_numpy_array(self, Column):
d = Column([1, 2, 3], name='a', dtype='i8')
np_data = np.array(d)
assert np.all(np_data == d)
np_data = np.array(d, copy=False)
assert np.all(np_data == d)
np_data = np.array(d, dtype='i4')
assert np.all(np_data == d)
def test_convert_unit(self, Column):
d = Column([1, 2, 3], name='a', dtype="f8", unit="m")
d.convert_unit_to("km")
assert np.all(d.data == [0.001, 0.002, 0.003])
def test_array_wrap(self):
"""Test that the __array_wrap__ method converts a reduction ufunc
output that has a different shape into an ndarray view. Without this a
method call like c.mean() returns a Column array object with length=1."""
# Mean and sum for a 1-d float column
c = table.Column(name='a', data=[1., 2., 3.])
assert np.allclose(c.mean(), 2.0)
assert isinstance(c.mean(), (np.floating, float))
assert np.allclose(c.sum(), 6.)
assert isinstance(c.sum(), (np.floating, float))
# Non-reduction ufunc preserves Column class
assert isinstance(np.cos(c), table.Column)
# Sum for a 1-d int column
c = table.Column(name='a', data=[1, 2, 3])
assert np.allclose(c.sum(), 6)
assert isinstance(c.sum(), (np.integer, int))
# Sum for a 2-d int column
c = table.Column(name='a', data=[[1, 2, 3],
[4, 5, 6]])
assert c.sum() == 21
assert isinstance(c.sum(), (np.integer, int))
assert np.all(c.sum(axis=0) == [5, 7, 9])
assert c.sum(axis=0).shape == (3,)
assert isinstance(c.sum(axis=0), np.ndarray)
# Sum and mean for a 1-d masked column
c = table.MaskedColumn(name='a', data=[1., 2., 3.], mask=[0, 0, 1])
assert np.allclose(c.mean(), 1.5)
assert isinstance(c.mean(), (np.floating, float))
assert np.allclose(c.sum(), 3.)
assert isinstance(c.sum(), (np.floating, float))
def test_name_none(self, Column):
"""Can create a column without supplying name, which defaults to None"""
c = Column([1, 2])
assert c.name is None
assert np.all(c == np.array([1, 2]))
def test_quantity_init(self, Column):
c = Column(data=np.array([1, 2, 3]) * u.m)
assert np.all(c.data == np.array([1, 2, 3]))
assert np.all(c.unit == u.m)
c = Column(data=np.array([1, 2, 3]) * u.m, unit=u.cm)
assert np.all(c.data == np.array([100, 200, 300]))
assert np.all(c.unit == u.cm)
def test_quantity_comparison(self, Column):
# regression test for gh-6532
c = Column([1, 2100, 3], unit='Hz')
q = 2 * u.kHz
check = c < q
assert np.all(check == [True, False, True])
# This already worked, but just in case.
check = q >= c
assert np.all(check == [True, False, True])
def test_attrs_survive_getitem_after_change(self, Column):
"""
Test for issue #3023: when calling getitem with a MaskedArray subclass
the original object attributes are not copied.
"""
c1 = Column([1, 2, 3], name='a', unit='m', format='%i',
description='aa', meta={'a': 1})
c1.name = 'b'
c1.unit = 'km'
c1.format = '%d'
c1.description = 'bb'
c1.meta = {'bbb': 2}
for item in (slice(None, None), slice(None, 1), np.array([0, 2]),
np.array([False, True, False])):
c2 = c1[item]
assert c2.name == 'b'
assert c2.unit is u.km
assert c2.format == '%d'
assert c2.description == 'bb'
assert c2.meta == {'bbb': 2}
# Make sure that calling getitem resulting in a scalar does
# not copy attributes.
val = c1[1]
for attr in ('name', 'unit', 'format', 'description', 'meta'):
assert not hasattr(val, attr)
def test_to_quantity(self, Column):
d = Column([1, 2, 3], name='a', dtype="f8", unit="m")
assert np.all(d.quantity == ([1, 2, 3.] * u.m))
assert np.all(d.quantity.value == ([1, 2, 3.] * u.m).value)
assert np.all(d.quantity == d.to('m'))
assert np.all(d.quantity.value == d.to('m').value)
np.testing.assert_allclose(d.to(u.km).value, ([.001, .002, .003] * u.km).value)
np.testing.assert_allclose(d.to('km').value, ([.001, .002, .003] * u.km).value)
np.testing.assert_allclose(d.to(u.MHz, u.equivalencies.spectral()).value,
[299.792458, 149.896229, 99.93081933])
d_nounit = Column([1, 2, 3], name='a', dtype="f8", unit=None)
with pytest.raises(u.UnitsError):
d_nounit.to(u.km)
assert np.all(d_nounit.to(u.dimensionless_unscaled) == np.array([1, 2, 3]))
# make sure the correct copy/no copy behavior is happening
q = [1, 3, 5] * u.km
# to should always make a copy
d.to(u.km)[:] = q
np.testing.assert_allclose(d, [1, 2, 3])
# explicit copying of the quantity should not change the column
d.quantity.copy()[:] = q
np.testing.assert_allclose(d, [1, 2, 3])
# but quantity directly is a "view", accessing the underlying column
d.quantity[:] = q
np.testing.assert_allclose(d, [1000, 3000, 5000])
# view should also work for integers
d2 = Column([1, 2, 3], name='a', dtype=int, unit="m")
d2.quantity[:] = q
np.testing.assert_allclose(d2, [1000, 3000, 5000])
# but it should fail for strings or other non-numeric tables
d3 = Column(['arg', 'name', 'stuff'], name='a', unit="m")
with pytest.raises(TypeError):
d3.quantity
def test_to_funcunit_quantity(self, Column):
"""
Tests for #8424, check if function-unit can be retrieved from column.
"""
d = Column([1, 2, 3], name='a', dtype="f8", unit="dex(AA)")
assert np.all(d.quantity == ([1, 2, 3] * u.dex(u.AA)))
assert np.all(d.quantity.value == ([1, 2, 3] * u.dex(u.AA)).value)
assert np.all(d.quantity == d.to("dex(AA)"))
assert np.all(d.quantity.value == d.to("dex(AA)").value)
# make sure, casting to linear unit works
q = [10, 100, 1000] * u.AA
np.testing.assert_allclose(d.to(u.AA), q)
def test_item_access_type(self, Column):
"""
Tests for #3095, which forces integer item access to always return a plain
ndarray or MaskedArray, even in the case of a multi-dim column.
"""
integer_types = (int, np.int_)
for int_type in integer_types:
c = Column([[1, 2], [3, 4]])
i0 = int_type(0)
i1 = int_type(1)
assert np.all(c[i0] == [1, 2])
assert type(c[i0]) == (np.ma.MaskedArray if hasattr(Column, 'mask') else np.ndarray)
assert c[i0].shape == (2,)
c01 = c[i0:i1]
assert np.all(c01 == [[1, 2]])
assert isinstance(c01, Column)
assert c01.shape == (1, 2)
c = Column([1, 2])
assert np.all(c[i0] == 1)
assert isinstance(c[i0], np.integer)
assert c[i0].shape == ()
c01 = c[i0:i1]
assert np.all(c01 == [1])
assert isinstance(c01, Column)
assert c01.shape == (1,)
def test_insert_basic(self, Column):
c = Column([0, 1, 2], name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
# Basic insert
c1 = c.insert(1, 100)
assert np.all(c1 == [0, 100, 1, 2])
assert c1.attrs_equal(c)
assert type(c) is type(c1)
if hasattr(c1, 'mask'):
assert c1.data.shape == c1.mask.shape
c1 = c.insert(-1, 100)
assert np.all(c1 == [0, 1, 100, 2])
c1 = c.insert(3, 100)
assert np.all(c1 == [0, 1, 2, 100])
c1 = c.insert(-3, 100)
assert np.all(c1 == [100, 0, 1, 2])
c1 = c.insert(1, [100, 200, 300])
if hasattr(c1, 'mask'):
assert c1.data.shape == c1.mask.shape
# Out of bounds index
with pytest.raises((ValueError, IndexError)):
c1 = c.insert(-4, 100)
with pytest.raises((ValueError, IndexError)):
c1 = c.insert(4, 100)
def test_insert_axis(self, Column):
"""Insert with non-default axis kwarg"""
c = Column([[1, 2], [3, 4]])
c1 = c.insert(1, [5, 6], axis=None)
assert np.all(c1 == [1, 5, 6, 2, 3, 4])
c1 = c.insert(1, [5, 6], axis=1)
assert np.all(c1 == [[1, 5, 2], [3, 6, 4]])
def test_insert_string_expand(self, Column):
c = Column(['a', 'b'])
c1 = c.insert(0, 'abc')
assert np.all(c1 == ['abc', 'a', 'b'])
c = Column(['a', 'b'])
c1 = c.insert(0, ['c', 'def'])
assert np.all(c1 == ['c', 'def', 'a', 'b'])
def test_insert_string_masked_values(self):
c = table.MaskedColumn(['a', 'b'])
c1 = c.insert(0, np.ma.masked)
assert np.all(c1 == ['', 'a', 'b'])
assert np.all(c1.mask == [True, False, False])
assert c1.dtype == 'U1'
c2 = c.insert(1, np.ma.MaskedArray(['ccc', 'dd'], mask=[True, False]))
assert np.all(c2 == ['a', 'ccc', 'dd', 'b'])
assert np.all(c2.mask == [False, True, False, False])
assert c2.dtype == 'U3'
def test_insert_string_type_error(self, Column):
c = Column([1, 2])
with pytest.raises(ValueError, match='invalid literal for int'):
c.insert(0, 'string')
c = Column(['a', 'b'])
with pytest.raises(TypeError, match='string operation on non-string array'):
c.insert(0, 1)
def test_insert_multidim(self, Column):
c = Column([[1, 2],
[3, 4]], name='a', dtype=int)
# Basic insert
c1 = c.insert(1, [100, 200])
assert np.all(c1 == [[1, 2], [100, 200], [3, 4]])
# Broadcast
c1 = c.insert(1, 100)
assert np.all(c1 == [[1, 2], [100, 100], [3, 4]])
# Wrong shape
with pytest.raises(ValueError):
c1 = c.insert(1, [100, 200, 300])
def test_insert_object(self, Column):
c = Column(['a', 1, None], name='a', dtype=object)
# Basic insert
c1 = c.insert(1, [100, 200])
assert np.all(c1 == np.array(['a', [100, 200], 1, None],
dtype=object))
def test_insert_masked(self):
c = table.MaskedColumn([0, 1, 2], name='a', fill_value=9999,
mask=[False, True, False])
# Basic insert
c1 = c.insert(1, 100)
assert np.all(c1.data.data == [0, 100, 1, 2])
assert c1.fill_value == 9999
assert np.all(c1.data.mask == [False, False, True, False])
assert type(c) is type(c1)
for mask in (False, True):
c1 = c.insert(1, 100, mask=mask)
assert np.all(c1.data.data == [0, 100, 1, 2])
assert np.all(c1.data.mask == [False, mask, True, False])
def test_masked_multidim_as_list(self):
data = np.ma.MaskedArray([1, 2], mask=[True, False])
c = table.MaskedColumn([data])
assert c.shape == (1, 2)
assert np.all(c[0].mask == [True, False])
def test_insert_masked_multidim(self):
c = table.MaskedColumn([[1, 2],
[3, 4]], name='a', dtype=int)
c1 = c.insert(1, [100, 200], mask=True)
assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]])
assert np.all(c1.data.mask == [[False, False], [True, True], [False, False]])
c1 = c.insert(1, [100, 200], mask=[True, False])
assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]])
assert np.all(c1.data.mask == [[False, False], [True, False], [False, False]])
with pytest.raises(ValueError):
c1 = c.insert(1, [100, 200], mask=[True, False, True])
def test_mask_on_non_masked_table(self):
"""
When table is not masked and trying to set mask on column then
it's Raise AttributeError.
"""
t = table.Table([[1, 2], [3, 4]], names=('a', 'b'), dtype=('i4', 'f8'))
with pytest.raises(AttributeError):
t['a'].mask = [True, False]
class TestAttrEqual():
"""Bunch of tests originally from ATpy that test the attrs_equal method."""
def test_5(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy')
c2 = Column(name='a', dtype=int, unit='mJy')
assert c1.attrs_equal(c2)
def test_6(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert c1.attrs_equal(c2)
def test_7(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='b', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_8(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=float, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_9(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='erg.cm-2.s-1.Hz-1', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_10(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%g',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_11(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='another test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_12(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'e': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_13(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 9, 'd': 12})
assert not c1.attrs_equal(c2)
def test_col_and_masked_col(self):
c1 = table.Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = table.MaskedColumn(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert c1.attrs_equal(c2)
assert c2.attrs_equal(c1)
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
class TestMetaColumn(MetaBaseTest):
test_class = table.Column
args = ()
class TestMetaMaskedColumn(MetaBaseTest):
test_class = table.MaskedColumn
args = ()
def test_getitem_metadata_regression():
"""
Regression test for #1471: MaskedArray does not call __array_finalize__ so
the meta-data was not getting copied over. By overloading _update_from we
are able to work around this bug.
"""
# Make sure that meta-data gets propagated with __getitem__
c = table.Column(data=[1, 2], name='a', description='b', unit='m', format="%i", meta={'c': 8})
assert c[1:2].name == 'a'
assert c[1:2].description == 'b'
assert c[1:2].unit == 'm'
assert c[1:2].format == '%i'
assert c[1:2].meta['c'] == 8
c = table.MaskedColumn(data=[1, 2], name='a', description='b',
unit='m', format="%i", meta={'c': 8})
assert c[1:2].name == 'a'
assert c[1:2].description == 'b'
assert c[1:2].unit == 'm'
assert c[1:2].format == '%i'
assert c[1:2].meta['c'] == 8
# As above, but with take() - check the method and the function
c = table.Column(data=[1, 2, 3], name='a', description='b',
unit='m', format="%i", meta={'c': 8})
for subset in [c.take([0, 1]), np.take(c, [0, 1])]:
assert subset.name == 'a'
assert subset.description == 'b'
assert subset.unit == 'm'
assert subset.format == '%i'
assert subset.meta['c'] == 8
# Metadata isn't copied for scalar values
for subset in [c.take(0), np.take(c, 0)]:
assert subset == 1
assert subset.shape == ()
assert not isinstance(subset, table.Column)
c = table.MaskedColumn(data=[1, 2, 3], name='a', description='b',
unit='m', format="%i", meta={'c': 8})
for subset in [c.take([0, 1]), np.take(c, [0, 1])]:
assert subset.name == 'a'
assert subset.description == 'b'
assert subset.unit == 'm'
assert subset.format == '%i'
assert subset.meta['c'] == 8
# Metadata isn't copied for scalar values
for subset in [c.take(0), np.take(c, 0)]:
assert subset == 1
assert subset.shape == ()
assert not isinstance(subset, table.MaskedColumn)
def test_unicode_guidelines():
arr = np.array([1, 2, 3])
c = table.Column(arr, name='a')
assert_follows_unicode_guidelines(c)
def test_scalar_column():
"""
Column is not designed to hold scalars, but for numpy 1.6 this can happen:
>> type(np.std(table.Column([1, 2])))
astropy.table.column.Column
"""
c = table.Column(1.5)
assert repr(c) == '1.5'
assert str(c) == '1.5'
def test_qtable_column_conversion():
"""
Ensures that a QTable that gets assigned a unit switches to be Quantity-y
"""
qtab = table.QTable([[1, 2], [3, 4.2]], names=['i', 'f'])
assert isinstance(qtab['i'], table.column.Column)
assert isinstance(qtab['f'], table.column.Column)
qtab['i'].unit = 'km/s'
assert isinstance(qtab['i'], u.Quantity)
assert isinstance(qtab['f'], table.column.Column)
# should follow from the above, but good to make sure as a #4497 regression test
assert isinstance(qtab['i'][0], u.Quantity)
assert isinstance(qtab[0]['i'], u.Quantity)
assert not isinstance(qtab['f'][0], u.Quantity)
assert not isinstance(qtab[0]['f'], u.Quantity)
# Regression test for #5342: if a function unit is assigned, the column
# should become the appropriate FunctionQuantity subclass.
qtab['f'].unit = u.dex(u.cm / u.s**2)
assert isinstance(qtab['f'], u.Dex)
@pytest.mark.parametrize('masked', [True, False])
def test_string_truncation_warning(masked):
"""
Test warnings associated with in-place assignment to a string
column that results in truncation of the right hand side.
"""
from inspect import currentframe, getframeinfo
t = table.Table([['aa', 'bb']], names=['a'], masked=masked)
t['a'][1] = 'cc'
t['a'][:] = 'dd'
with pytest.warns(table.StringTruncateWarning, match=r'truncated right side '
r'string\(s\) longer than 2 character\(s\)') as w:
frameinfo = getframeinfo(currentframe())
t['a'][0] = 'eee' # replace item with string that gets truncated
assert t['a'][0] == 'ee'
assert len(w) == 1
# Make sure the warning points back to the user code line
assert w[0].lineno == frameinfo.lineno + 1
assert 'test_column' in w[0].filename
with pytest.warns(table.StringTruncateWarning, match=r'truncated right side '
r'string\(s\) longer than 2 character\(s\)') as w:
t['a'][:] = ['ff', 'ggg'] # replace item with string that gets truncated
assert np.all(t['a'] == ['ff', 'gg'])
assert len(w) == 1
# Test the obscure case of assigning from an array that was originally
# wider than any of the current elements (i.e. dtype is U4 but actual
# elements are U1 at the time of assignment).
val = np.array(['ffff', 'gggg'])
val[:] = ['f', 'g']
t['a'][:] = val
assert np.all(t['a'] == ['f', 'g'])
def test_string_truncation_warning_masked():
"""
Test warnings associated with in-place assignment to a string
to a masked column, specifically where the right hand side
contains np.ma.masked.
"""
# Test for strings, but also cover assignment of np.ma.masked to
# int and float masked column setting. This was previously only
# covered in an unrelated io.ascii test (test_line_endings) which
# showed an unexpected difference between handling of str and numeric
# masked arrays.
for values in (['a', 'b'], [1, 2], [1.0, 2.0]):
mc = table.MaskedColumn(values)
mc[1] = np.ma.masked
assert np.all(mc.mask == [False, True])
mc[:] = np.ma.masked
assert np.all(mc.mask == [True, True])
mc = table.MaskedColumn(['aa', 'bb'])
with pytest.warns(table.StringTruncateWarning, match=r'truncated right side '
r'string\(s\) longer than 2 character\(s\)') as w:
mc[:] = [np.ma.masked, 'ggg'] # replace item with string that gets truncated
assert mc[1] == 'gg'
assert np.all(mc.mask == [True, False])
assert len(w) == 1
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_col_unicode_sandwich_create_from_str(Column):
"""
Create a bytestring Column from strings (including unicode) in Py3.
"""
# a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding.
# Stress the system by injecting non-ASCII characters.
uba = 'bä'
c = Column([uba, 'def'], dtype='S')
assert c.dtype.char == 'S'
assert c[0] == uba
assert isinstance(c[0], str)
assert isinstance(c[:0], table.Column)
assert np.all(c[:2] == np.array([uba, 'def']))
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_col_unicode_sandwich_bytes_obj(Column):
"""
Create a Column of dtype object with bytestring in it and make sure
it keeps the bytestring and not convert to str with accessed.
"""
c = Column([None, b'def'])
assert c.dtype.char == 'O'
assert not c[0]
assert c[1] == b'def'
assert isinstance(c[1], bytes)
assert not isinstance(c[1], str)
assert isinstance(c[:0], table.Column)
assert np.all(c[:2] == np.array([None, b'def']))
assert not np.all(c[:2] == np.array([None, 'def']))
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_col_unicode_sandwich_bytes(Column):
"""
Create a bytestring Column from bytes and ensure that it works in Python 3 in
a convenient way like in Python 2.
"""
# a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding.
# Stress the system by injecting non-ASCII characters.
uba = 'bä'
uba8 = uba.encode('utf-8')
c = Column([uba8, b'def'])
assert c.dtype.char == 'S'
assert c[0] == uba
assert isinstance(c[0], str)
assert isinstance(c[:0], table.Column)
assert np.all(c[:2] == np.array([uba, 'def']))
assert isinstance(c[:], table.Column)
assert c[:].dtype.char == 'S'
# Array / list comparisons
assert np.all(c == [uba, 'def'])
ok = c == [uba8, b'def']
assert type(ok) is type(c.data) # noqa
assert ok.dtype.char == '?'
assert np.all(ok)
assert np.all(c == np.array([uba, 'def']))
assert np.all(c == np.array([uba8, b'def']))
# Scalar compare
cmps = (uba, uba8)
for cmp in cmps:
ok = c == cmp
assert type(ok) is type(c.data) # noqa
assert np.all(ok == [True, False])
def test_col_unicode_sandwich_unicode():
"""
Sanity check that Unicode Column behaves normally.
"""
uba = 'bä'
uba8 = uba.encode('utf-8')
c = table.Column([uba, 'def'], dtype='U')
assert c[0] == uba
assert isinstance(c[:0], table.Column)
assert isinstance(c[0], str)
assert np.all(c[:2] == np.array([uba, 'def']))
assert isinstance(c[:], table.Column)
assert c[:].dtype.char == 'U'
ok = c == [uba, 'def']
assert type(ok) == np.ndarray
assert ok.dtype.char == '?'
assert np.all(ok)
with warnings.catch_warnings():
# Ignore the FutureWarning in numpy >=1.24 (it is OK).
warnings.filterwarnings('ignore', message='.*elementwise comparison failed.*')
assert np.all(c != [uba8, b'def'])
def test_masked_col_unicode_sandwich():
"""
Create a bytestring MaskedColumn and ensure that it works in Python 3 in
a convenient way like in Python 2.
"""
c = table.MaskedColumn([b'abc', b'def'])
c[1] = np.ma.masked
assert isinstance(c[:0], table.MaskedColumn)
assert isinstance(c[0], str)
assert c[0] == 'abc'
assert c[1] is np.ma.masked
assert isinstance(c[:], table.MaskedColumn)
assert c[:].dtype.char == 'S'
ok = c == ['abc', 'def']
assert ok[0] == True # noqa
assert ok[1] is np.ma.masked
assert np.all(c == [b'abc', b'def'])
assert np.all(c == np.array(['abc', 'def']))
assert np.all(c == np.array([b'abc', b'def']))
for cmp in ('abc', b'abc'):
ok = c == cmp
assert type(ok) is np.ma.MaskedArray
assert ok[0] == True # noqa
assert ok[1] is np.ma.masked
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_unicode_sandwich_set(Column):
"""
Test setting
"""
uba = 'bä'
c = Column([b'abc', b'def'])
c[0] = b'aa'
assert np.all(c == ['aa', 'def'])
c[0] = uba # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding
assert np.all(c == [uba, 'def'])
assert c.pformat() == ['None', '----', ' ' + uba, ' def']
c[:] = b'cc'
assert np.all(c == ['cc', 'cc'])
c[:] = uba
assert np.all(c == [uba, uba])
c[:] = ''
c[:] = [uba, b'def']
assert np.all(c == [uba, b'def'])
@pytest.mark.parametrize('class1', [table.MaskedColumn, table.Column])
@pytest.mark.parametrize('class2', [table.MaskedColumn, table.Column, str, list])
def test_unicode_sandwich_compare(class1, class2):
"""Test that comparing a bytestring Column/MaskedColumn with various
str (unicode) object types gives the expected result. Tests #6838.
"""
obj1 = class1([b'a', b'c'])
if class2 is str:
obj2 = 'a'
elif class2 is list:
obj2 = ['a', 'b']
else:
obj2 = class2(['a', 'b'])
assert np.all((obj1 == obj2) == [True, False])
assert np.all((obj2 == obj1) == [True, False])
assert np.all((obj1 != obj2) == [False, True])
assert np.all((obj2 != obj1) == [False, True])
assert np.all((obj1 > obj2) == [False, True])
assert np.all((obj2 > obj1) == [False, False])
assert np.all((obj1 <= obj2) == [True, False])
assert np.all((obj2 <= obj1) == [True, True])
assert np.all((obj1 < obj2) == [False, False])
assert np.all((obj2 < obj1) == [False, True])
assert np.all((obj1 >= obj2) == [True, True])
assert np.all((obj2 >= obj1) == [True, False])
def test_unicode_sandwich_masked_compare():
"""Test the fix for #6839 from #6899."""
c1 = table.MaskedColumn(['a', 'b', 'c', 'd'],
mask=[True, False, True, False])
c2 = table.MaskedColumn([b'a', b'b', b'c', b'd'],
mask=[True, True, False, False])
for cmp in ((c1 == c2), (c2 == c1)):
assert cmp[0] is np.ma.masked
assert cmp[1] is np.ma.masked
assert cmp[2] is np.ma.masked
assert cmp[3]
for cmp in ((c1 != c2), (c2 != c1)):
assert cmp[0] is np.ma.masked
assert cmp[1] is np.ma.masked
assert cmp[2] is np.ma.masked
assert not cmp[3]
# Note: comparisons <, >, >=, <= fail to return a masked array entirely,
# see https://github.com/numpy/numpy/issues/10092.
def test_structured_masked_column_roundtrip():
mc = table.MaskedColumn([(1., 2.), (3., 4.)],
mask=[(False, False), (False, False)], dtype='f8,f8')
assert len(mc.dtype.fields) == 2
mc2 = table.MaskedColumn(mc)
assert_array_equal(mc2, mc)
@pytest.mark.parametrize('dtype', ['i4,f4', 'f4,(2,)f8'])
def test_structured_empty_column_init(dtype):
dtype = np.dtype(dtype)
c = table.Column(length=5, shape=(2,), dtype=dtype)
assert c.shape == (5, 2)
assert c.dtype == dtype
def test_column_value_access():
"""Can a column's underlying data consistently be accessed via `.value`,
whether it is a `Column`, `MaskedColumn`, `Quantity`, or `Time`?"""
data = np.array([1, 2, 3])
tbl = table.QTable({'a': table.Column(data),
'b': table.MaskedColumn(data),
'c': u.Quantity(data),
'd': time.Time(data, format='mjd')})
assert type(tbl['a'].value) == np.ndarray
assert type(tbl['b'].value) == np.ma.MaskedArray
assert type(tbl['c'].value) == np.ndarray
assert type(tbl['d'].value) == np.ndarray
def test_masked_column_serialize_method_propagation():
mc = table.MaskedColumn([1., 2., 3.], mask=[True, False, True])
assert mc.info.serialize_method['ecsv'] == 'null_value'
mc.info.serialize_method['ecsv'] = 'data_mask'
assert mc.info.serialize_method['ecsv'] == 'data_mask'
mc2 = mc.copy()
assert mc2.info.serialize_method['ecsv'] == 'data_mask'
mc3 = table.MaskedColumn(mc)
assert mc3.info.serialize_method['ecsv'] == 'data_mask'
mc4 = mc.view(table.MaskedColumn)
assert mc4.info.serialize_method['ecsv'] == 'data_mask'
mc5 = mc[1:]
assert mc5.info.serialize_method['ecsv'] == 'data_mask'
@pytest.mark.parametrize('dtype', ['S', 'U', 'i'])
def test_searchsorted(Column, dtype):
c = Column([1, 2, 2, 3], dtype=dtype)
if isinstance(Column, table.MaskedColumn):
# Searchsorted seems to ignore the mask
c[2] = np.ma.masked
if dtype == 'i':
vs = (2, [2, 1])
else:
vs = ('2', ['2', '1'], b'2', [b'2', b'1'])
for v in vs:
v = np.array(v, dtype=dtype)
exp = np.searchsorted(c.data, v, side='right')
res = c.searchsorted(v, side='right')
assert np.all(res == exp)
res = np.searchsorted(c, v, side='right')
assert np.all(res == exp)
|
c7719415693adf869cf20249c8a04b5c428e803f6cfdf7ce3fb26bdc76258c12 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import pickle
from io import StringIO
import pytest
import numpy as np
from astropy.table.serialize import represent_mixins_as_columns
from astropy.utils.data_info import ParentDtypeInfo
from astropy.table.table_helpers import ArrayWrapper
from astropy.coordinates import EarthLocation, SkyCoord
from astropy.table import Table, QTable, join, hstack, vstack, Column, NdarrayMixin
from astropy.table import serialize
from astropy import time
from astropy import coordinates
from astropy import units as u
from astropy.table.column import BaseColumn
from astropy.table import table_helpers
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.metadata import MergeConflictWarning
from astropy.coordinates.tests.test_representation import representation_equal
from astropy.coordinates.tests.helper import skycoord_equal
from .conftest import MIXIN_COLS
def test_attributes(mixin_cols):
"""
Required attributes for a column can be set.
"""
m = mixin_cols['m']
m.info.name = 'a'
assert m.info.name == 'a'
m.info.description = 'a'
assert m.info.description == 'a'
# Cannot set unit for these classes
if isinstance(m, (u.Quantity, coordinates.SkyCoord, time.Time, time.TimeDelta,
coordinates.BaseRepresentationOrDifferential)):
with pytest.raises(AttributeError):
m.info.unit = u.m
else:
m.info.unit = u.m
assert m.info.unit is u.m
m.info.format = 'a'
assert m.info.format == 'a'
m.info.meta = {'a': 1}
assert m.info.meta == {'a': 1}
with pytest.raises(AttributeError):
m.info.bad_attr = 1
with pytest.raises(AttributeError):
m.info.bad_attr
def check_mixin_type(table, table_col, in_col):
# We check for QuantityInfo rather than just isinstance(col, u.Quantity)
# since we want to treat EarthLocation as a mixin, even though it is
# a Quantity subclass.
if ((isinstance(in_col.info, u.QuantityInfo) and type(table) is not QTable)
or isinstance(in_col, Column)):
assert type(table_col) is table.ColumnClass
else:
assert type(table_col) is type(in_col)
# Make sure in_col got copied and creating table did not touch it
assert in_col.info.name is None
def test_make_table(table_types, mixin_cols):
"""
Make a table with the columns in mixin_cols, which is an ordered dict of
three cols: 'a' and 'b' are table_types.Column type, and 'm' is a mixin.
"""
t = table_types.Table(mixin_cols)
check_mixin_type(t, t['m'], mixin_cols['m'])
cols = list(mixin_cols.values())
t = table_types.Table(cols, names=('i', 'a', 'b', 'm'))
check_mixin_type(t, t['m'], mixin_cols['m'])
t = table_types.Table(cols)
check_mixin_type(t, t['col3'], mixin_cols['m'])
def test_io_ascii_write():
"""
Test that table with mixin column can be written by io.ascii for
every pure Python writer. No validation of the output is done,
this just confirms no exceptions.
"""
from astropy.io.ascii.connect import _get_connectors_table
t = QTable(MIXIN_COLS)
for fmt in _get_connectors_table():
if fmt['Write'] and '.fast_' not in fmt['Format']:
out = StringIO()
t.write(out, format=fmt['Format'])
def test_votable_quantity_write(tmpdir):
"""
Test that table with Quantity mixin column can be round-tripped by
io.votable. Note that FITS and HDF5 mixin support are tested (much more
thoroughly) in their respective subpackage tests
(io/fits/tests/test_connect.py and io/misc/tests/test_hdf5.py).
"""
t = QTable()
t['a'] = u.Quantity([1, 2, 4], unit='nm')
filename = str(tmpdir.join('table-tmp'))
t.write(filename, format='votable', overwrite=True)
qt = QTable.read(filename, format='votable')
assert isinstance(qt['a'], u.Quantity)
assert qt['a'].unit == 'nm'
@pytest.mark.remote_data
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_write_fits_standard(tmpdir, table_types):
"""
Test that table with Time mixin columns can be written by io.fits.
Validation of the output is done. Test that io.fits writes a table
containing Time mixin columns that can be partially round-tripped
(metadata scale, location).
Note that we postpone checking the "local" scale, since that cannot
be done with format 'cxcsec', as it requires an epoch.
"""
t = table_types([[1, 2], ['string', 'column']])
for scale in time.STANDARD_TIME_SCALES:
t['a' + scale] = time.Time([[1, 2], [3, 4]], format='cxcsec',
scale=scale, location=EarthLocation(
-2446354, 4237210, 4077985, unit='m'))
t['b' + scale] = time.Time(['1999-01-01T00:00:00.123456789',
'2010-01-01T00:00:00'], scale=scale)
t['c'] = [3., 4.]
filename = str(tmpdir.join('table-tmp'))
# Show that FITS format succeeds
with pytest.warns(
AstropyUserWarning,
match='Time Column "btai" has no specified location, '
'but global Time Position is present'):
t.write(filename, format='fits', overwrite=True)
with pytest.warns(
AstropyUserWarning,
match='Time column reference position "TRPOSn" is not specified'):
tm = table_types.read(filename, format='fits', astropy_native=True)
for scale in time.STANDARD_TIME_SCALES:
for ab in ('a', 'b'):
name = ab + scale
# Assert that the time columns are read as Time
assert isinstance(tm[name], time.Time)
# Assert that the scales round-trip
assert tm[name].scale == t[name].scale
# Assert that the format is jd
assert tm[name].format == 'jd'
# Assert that the location round-trips
assert tm[name].location == t[name].location
# Finally assert that the column data round-trips
assert (tm[name] == t[name]).all()
for name in ('col0', 'col1', 'c'):
# Assert that the non-time columns are read as Column
assert isinstance(tm[name], Column)
# Assert that the non-time columns' data round-trips
assert (tm[name] == t[name]).all()
# Test for conversion of time data to its value, as defined by its format
for scale in time.STANDARD_TIME_SCALES:
for ab in ('a', 'b'):
name = ab + scale
t[name].info.serialize_method['fits'] = 'formatted_value'
t.write(filename, format='fits', overwrite=True)
tm = table_types.read(filename, format='fits')
for scale in time.STANDARD_TIME_SCALES:
for ab in ('a', 'b'):
name = ab + scale
assert not isinstance(tm[name], time.Time)
assert (tm[name] == t[name].value).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_write_fits_local(tmpdir, table_types):
"""
Test that table with a Time mixin with scale local can also be written
by io.fits. Like ``test_io_time_write_fits_standard`` above, but avoiding
``cxcsec`` format, which requires an epoch and thus cannot be used for a
local time scale.
"""
t = table_types([[1, 2], ['string', 'column']])
t['a_local'] = time.Time([[50001, 50002], [50003, 50004]],
format='mjd', scale='local',
location=EarthLocation(-2446354, 4237210, 4077985,
unit='m'))
t['b_local'] = time.Time(['1999-01-01T00:00:00.123456789',
'2010-01-01T00:00:00'], scale='local')
t['c'] = [3., 4.]
filename = str(tmpdir.join('table-tmp'))
# Show that FITS format succeeds
with pytest.warns(AstropyUserWarning,
match='Time Column "b_local" has no specified location'):
t.write(filename, format='fits', overwrite=True)
with pytest.warns(AstropyUserWarning,
match='Time column reference position "TRPOSn" is not specified.'):
tm = table_types.read(filename, format='fits', astropy_native=True)
for ab in ('a', 'b'):
name = ab + '_local'
# Assert that the time columns are read as Time
assert isinstance(tm[name], time.Time)
# Assert that the scales round-trip
assert tm[name].scale == t[name].scale
# Assert that the format is jd
assert tm[name].format == 'jd'
# Assert that the location round-trips
assert tm[name].location == t[name].location
# Finally assert that the column data round-trips
assert (tm[name] == t[name]).all()
for name in ('col0', 'col1', 'c'):
# Assert that the non-time columns are read as Column
assert isinstance(tm[name], Column)
# Assert that the non-time columns' data round-trips
assert (tm[name] == t[name]).all()
# Test for conversion of time data to its value, as defined by its format.
for ab in ('a', 'b'):
name = ab + '_local'
t[name].info.serialize_method['fits'] = 'formatted_value'
t.write(filename, format='fits', overwrite=True)
tm = table_types.read(filename, format='fits')
for ab in ('a', 'b'):
name = ab + '_local'
assert not isinstance(tm[name], time.Time)
assert (tm[name] == t[name].value).all()
def test_votable_mixin_write_fail(mixin_cols):
"""
Test that table with mixin columns (excluding Quantity) cannot be written by
io.votable.
"""
t = QTable(mixin_cols)
# Only do this test if there are unsupported column types (i.e. anything besides
# BaseColumn and Quantity class instances).
unsupported_cols = t.columns.not_isinstance((BaseColumn, u.Quantity))
if not unsupported_cols:
pytest.skip("no unsupported column types")
out = StringIO()
with pytest.raises(ValueError) as err:
t.write(out, format='votable')
assert 'cannot write table with mixin column(s)' in str(err.value)
def test_join(table_types):
"""
Join tables with mixin cols. Use column "i" as proxy for what the
result should be for each mixin.
"""
t1 = table_types.Table()
t1['a'] = table_types.Column(['a', 'b', 'b', 'c'])
t1['i'] = table_types.Column([0, 1, 2, 3])
for name, col in MIXIN_COLS.items():
t1[name] = col
t2 = table_types.Table(t1)
t2['a'] = ['b', 'c', 'a', 'd']
for name, col in MIXIN_COLS.items():
t1[name].info.description = name
t2[name].info.description = name + '2'
for join_type in ('inner', 'left'):
t12 = join(t1, t2, keys='a', join_type=join_type)
idx1 = t12['i_1']
idx2 = t12['i_2']
for name, col in MIXIN_COLS.items():
name1 = name + '_1'
name2 = name + '_2'
assert_table_name_col_equal(t12, name1, col[idx1])
assert_table_name_col_equal(t12, name2, col[idx2])
assert t12[name1].info.description == name
assert t12[name2].info.description == name + '2'
for join_type in ('outer', 'right'):
with pytest.raises(NotImplementedError) as exc:
t12 = join(t1, t2, keys='a', join_type=join_type)
assert 'join requires masking column' in str(exc.value)
with pytest.raises(TypeError) as exc:
t12 = join(t1, t2, keys=['a', 'skycoord'])
assert 'one or more key columns are not sortable' in str(exc.value)
# Join does work for a mixin which is a subclass of np.ndarray
with pytest.warns(MergeConflictWarning,
match="In merged column 'quantity' the 'description' "
"attribute does not match"):
t12 = join(t1, t2, keys=['quantity'])
assert np.all(t12['a_1'] == t1['a'])
def test_hstack(table_types):
"""
Hstack tables with mixin cols. Use column "i" as proxy for what the
result should be for each mixin.
"""
t1 = table_types.Table()
t1['i'] = table_types.Column([0, 1, 2, 3])
for name, col in MIXIN_COLS.items():
t1[name] = col
t1[name].info.description = name
t1[name].info.meta = {'a': 1}
for join_type in ('inner', 'outer'):
for chop in (True, False):
t2 = table_types.Table(t1)
if chop:
t2 = t2[:-1]
if join_type == 'outer':
with pytest.raises(NotImplementedError) as exc:
t12 = hstack([t1, t2], join_type=join_type)
assert 'hstack requires masking column' in str(exc.value)
continue
t12 = hstack([t1, t2], join_type=join_type)
idx1 = t12['i_1']
idx2 = t12['i_2']
for name, col in MIXIN_COLS.items():
name1 = name + '_1'
name2 = name + '_2'
assert_table_name_col_equal(t12, name1, col[idx1])
assert_table_name_col_equal(t12, name2, col[idx2])
for attr in ('description', 'meta'):
assert getattr(t1[name].info, attr) == getattr(t12[name1].info, attr)
assert getattr(t2[name].info, attr) == getattr(t12[name2].info, attr)
def assert_table_name_col_equal(t, name, col):
"""
Assert all(t[name] == col), with special handling for known mixin cols.
"""
if isinstance(col, coordinates.SkyCoord):
assert np.all(t[name].ra == col.ra)
assert np.all(t[name].dec == col.dec)
elif isinstance(col, coordinates.BaseRepresentationOrDifferential):
assert np.all(representation_equal(t[name], col))
elif isinstance(col, u.Quantity):
if type(t) is QTable:
assert np.all(t[name] == col)
elif isinstance(col, table_helpers.ArrayWrapper):
assert np.all(t[name].data == col.data)
else:
assert np.all(t[name] == col)
def test_get_items(mixin_cols):
"""
Test that slicing / indexing table gives right values and col attrs inherit
"""
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
m = mixin_cols['m']
m.info.name = 'm'
m.info.format = '{0}'
m.info.description = 'd'
m.info.meta = {'a': 1}
t = QTable([m])
for item in ([1, 3], np.array([0, 2]), slice(1, 3)):
t2 = t[item]
m2 = m[item]
assert_table_name_col_equal(t2, 'm', m[item])
for attr in attrs:
assert getattr(t2['m'].info, attr) == getattr(m.info, attr)
assert getattr(m2.info, attr) == getattr(m.info, attr)
def test_info_preserved_pickle_copy_init(mixin_cols):
"""
Test copy, pickle, and init from class roundtrip preserve info. This
tests not only the mixin classes but a regular column as well.
"""
def pickle_roundtrip(c):
return pickle.loads(pickle.dumps(c))
def init_from_class(c):
return c.__class__(c)
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
for colname in ('i', 'm'):
m = mixin_cols[colname]
m.info.name = colname
m.info.format = '{0}'
m.info.description = 'd'
m.info.meta = {'a': 1}
for func in (copy.copy, copy.deepcopy, pickle_roundtrip, init_from_class):
m2 = func(m)
for attr in attrs:
# non-native byteorder not preserved by last 2 func, _except_ for structured dtype
if (attr != 'dtype'
or getattr(m.info.dtype, 'isnative', True)
or m.info.dtype.name.startswith('void')
or func in (copy.copy, copy.deepcopy)):
original = getattr(m.info, attr)
else:
# func does not preserve byteorder, check against (native) type.
original = m.info.dtype.newbyteorder('=')
assert getattr(m2.info, attr) == original
def test_add_column(mixin_cols):
"""
Test that adding a column preserves values and attributes
"""
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
m = mixin_cols['m']
assert m.info.name is None
# Make sure adding column in various ways doesn't touch
t = QTable([m], names=['a'])
assert m.info.name is None
t['new'] = m
assert m.info.name is None
m.info.name = 'm'
m.info.format = '{0}'
m.info.description = 'd'
m.info.meta = {'a': 1}
t = QTable([m])
# Add columns m2, m3, m4 by two different methods and test expected equality
t['m2'] = m
m.info.name = 'm3'
t.add_columns([m], copy=True)
m.info.name = 'm4'
t.add_columns([m], copy=False)
for name in ('m2', 'm3', 'm4'):
assert_table_name_col_equal(t, name, m)
for attr in attrs:
if attr != 'name':
assert getattr(t['m'].info, attr) == getattr(t[name].info, attr)
# Also check that one can set using a scalar.
s = m[0]
if type(s) is type(m) and 'info' in s.__dict__:
# We're not going to worry about testing classes for which scalars
# are a different class than the real array, or where info is not copied.
t['s'] = m[0]
assert_table_name_col_equal(t, 's', m[0])
for attr in attrs:
if attr != 'name':
assert getattr(t['m'].info, attr) == getattr(t['s'].info, attr)
# While we're add it, also check a length-1 table.
t = QTable([m[1:2]], names=['m'])
if type(s) is type(m) and 'info' in s.__dict__:
t['s'] = m[0]
assert_table_name_col_equal(t, 's', m[0])
for attr in attrs:
if attr != 'name':
assert getattr(t['m'].info, attr) == getattr(t['s'].info, attr)
def test_vstack():
"""
Vstack tables with mixin cols.
"""
t1 = QTable(MIXIN_COLS)
t2 = QTable(MIXIN_COLS)
with pytest.raises(NotImplementedError):
vstack([t1, t2])
def test_insert_row(mixin_cols):
"""
Test inserting a row, which works for Column, Quantity, Time and SkyCoord.
"""
t = QTable(mixin_cols)
t0 = t.copy()
t['m'].info.description = 'd'
idxs = [0, -1, 1, 2, 3]
if isinstance(t['m'], (u.Quantity, Column, time.Time, time.TimeDelta, coordinates.SkyCoord)):
t.insert_row(1, t[-1])
for name in t.colnames:
col = t[name]
if isinstance(col, coordinates.SkyCoord):
assert skycoord_equal(col, t0[name][idxs])
else:
assert np.all(col == t0[name][idxs])
assert t['m'].info.description == 'd'
else:
with pytest.raises(ValueError) as exc:
t.insert_row(1, t[-1])
assert "Unable to insert row" in str(exc.value)
def test_insert_row_bad_unit():
"""
Insert a row into a QTable with the wrong unit
"""
t = QTable([[1] * u.m])
with pytest.raises(ValueError) as exc:
t.insert_row(0, (2 * u.m / u.s,))
assert "'m / s' (speed/velocity) and 'm' (length) are not convertible" in str(exc.value)
def test_convert_np_array(mixin_cols):
"""
Test that converting to numpy array creates an object dtype and that
each instance in the array has the expected type.
"""
t = QTable(mixin_cols)
ta = t.as_array()
m = mixin_cols['m']
dtype_kind = m.dtype.kind if hasattr(m, 'dtype') else 'O'
assert ta['m'].dtype.kind == dtype_kind
def test_assignment_and_copy():
"""
Test that assignment of an int, slice, and fancy index works.
Along the way test that copying table works.
"""
for name in ('quantity', 'arraywrap'):
m = MIXIN_COLS[name]
t0 = QTable([m], names=['m'])
for i0, i1 in ((1, 2),
(slice(0, 2), slice(1, 3)),
(np.array([1, 2]), np.array([2, 3]))):
t = t0.copy()
t['m'][i0] = m[i1]
if name == 'arraywrap':
assert np.all(t['m'].data[i0] == m.data[i1])
assert np.all(t0['m'].data[i0] == m.data[i0])
assert np.all(t0['m'].data[i0] != t['m'].data[i0])
else:
assert np.all(t['m'][i0] == m[i1])
assert np.all(t0['m'][i0] == m[i0])
assert np.all(t0['m'][i0] != t['m'][i0])
def test_conversion_qtable_table():
"""
Test that a table round trips from QTable => Table => QTable
"""
qt = QTable(MIXIN_COLS)
names = qt.colnames
for name in names:
qt[name].info.description = name
t = Table(qt)
for name in names:
assert t[name].info.description == name
if name == 'quantity':
assert np.all(t['quantity'] == qt['quantity'].value)
assert np.all(t['quantity'].unit is qt['quantity'].unit)
assert isinstance(t['quantity'], t.ColumnClass)
else:
assert_table_name_col_equal(t, name, qt[name])
qt2 = QTable(qt)
for name in names:
assert qt2[name].info.description == name
assert_table_name_col_equal(qt2, name, qt[name])
def test_setitem_as_column_name():
"""
Test for mixin-related regression described in #3321.
"""
t = Table()
t['a'] = ['x', 'y']
t['b'] = 'b' # Previously was failing with KeyError
assert np.all(t['a'] == ['x', 'y'])
assert np.all(t['b'] == ['b', 'b'])
def test_quantity_representation():
"""
Test that table representation of quantities does not have unit
"""
t = QTable([[1, 2] * u.m])
assert t.pformat() == ['col0',
' m ',
'----',
' 1.0',
' 2.0']
def test_representation_representation():
"""
Test that Representations are represented correctly.
"""
# With no unit we get "None" in the unit row
c = coordinates.CartesianRepresentation([0], [1], [0], unit=u.one)
t = Table([c])
assert t.pformat() == [' col0 ',
'------------',
'(0., 1., 0.)']
c = coordinates.CartesianRepresentation([0], [1], [0], unit='m')
t = Table([c])
assert t.pformat() == [' col0 ',
' m ',
'------------',
'(0., 1., 0.)']
c = coordinates.SphericalRepresentation([10]*u.deg, [20]*u.deg, [1]*u.pc)
t = Table([c])
assert t.pformat() == [' col0 ',
' deg, deg, pc ',
'--------------',
'(10., 20., 1.)']
c = coordinates.UnitSphericalRepresentation([10]*u.deg, [20]*u.deg)
t = Table([c])
assert t.pformat() == [' col0 ',
' deg ',
'----------',
'(10., 20.)']
c = coordinates.SphericalCosLatDifferential(
[10]*u.mas/u.yr, [2]*u.mas/u.yr, [10]*u.km/u.s)
t = Table([c])
assert t.pformat() == [' col0 ',
'mas / yr, mas / yr, km / s',
'--------------------------',
' (10., 2., 10.)']
def test_skycoord_representation():
"""
Test that skycoord representation works, both in the way that the
values are output and in changing the frame representation.
"""
# With no unit we get "None" in the unit row
c = coordinates.SkyCoord([0], [1], [0], representation_type='cartesian')
t = Table([c])
assert t.pformat() == [' col0 ',
'None,None,None',
'--------------',
' 0.0,1.0,0.0']
# Test that info works with a dynamically changed representation
c = coordinates.SkyCoord([0], [1], [0], unit='m', representation_type='cartesian')
t = Table([c])
assert t.pformat() == [' col0 ',
' m,m,m ',
'-----------',
'0.0,1.0,0.0']
t['col0'].representation_type = 'unitspherical'
assert t.pformat() == [' col0 ',
'deg,deg ',
'--------',
'90.0,0.0']
t['col0'].representation_type = 'cylindrical'
assert t.pformat() == [' col0 ',
' m,deg,m ',
'------------',
'1.0,90.0,0.0']
@pytest.mark.parametrize('as_ndarray_mixin', [True, False])
def test_ndarray_mixin(as_ndarray_mixin):
"""
Test directly adding various forms of structured ndarray columns to a table.
Adding as NdarrayMixin is expected to be somewhat unusual after #12644
(which provides full support for structured array Column's). This test shows
that the end behavior is the same in both cases.
"""
a = np.array([(1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')],
dtype='<i4,' + ('|U1'))
b = np.array([(10, 'aa'), (20, 'bb'), (30, 'cc'), (40, 'dd')],
dtype=[('x', 'i4'), ('y', ('U2'))])
c = np.rec.fromrecords([(100., 'raa'), (200., 'rbb'), (300., 'rcc'), (400., 'rdd')],
names=['rx', 'ry'])
d = np.arange(8, dtype='i8').reshape(4, 2)
if as_ndarray_mixin:
a = a.view(NdarrayMixin)
b = b.view(NdarrayMixin)
c = c.view(NdarrayMixin)
d = d.view(NdarrayMixin)
class_exp = NdarrayMixin
else:
class_exp = Column
# Add one during initialization and the next as a new column.
t = Table([a], names=['a'])
t['b'] = b
t['c'] = c
t['d'] = d
assert isinstance(t['a'], class_exp)
assert t['a'][1][1] == a[1][1]
assert t['a'][2][0] == a[2][0]
assert t[1]['a'][1] == a[1][1]
assert t[2]['a'][0] == a[2][0]
assert isinstance(t['b'], class_exp)
assert t['b'][1]['x'] == b[1]['x']
assert t['b'][1]['y'] == b[1]['y']
assert t[1]['b']['x'] == b[1]['x']
assert t[1]['b']['y'] == b[1]['y']
assert isinstance(t['c'], class_exp)
assert t['c'][1]['rx'] == c[1]['rx']
assert t['c'][1]['ry'] == c[1]['ry']
assert t[1]['c']['rx'] == c[1]['rx']
assert t[1]['c']['ry'] == c[1]['ry']
assert isinstance(t['d'], class_exp)
assert t['d'][1][0] == d[1][0]
assert t['d'][1][1] == d[1][1]
assert t[1]['d'][0] == d[1][0]
assert t[1]['d'][1] == d[1][1]
assert t.pformat(show_dtype=True) == [
' a [f0, f1] b [x, y] c [rx, ry] d ',
'(int32, str1) (int32, str2) (float64, str3) int64[2]',
'------------- ------------- --------------- --------',
" (1, 'a') (10, 'aa') (100., 'raa') 0 .. 1",
" (2, 'b') (20, 'bb') (200., 'rbb') 2 .. 3",
" (3, 'c') (30, 'cc') (300., 'rcc') 4 .. 5",
" (4, 'd') (40, 'dd') (400., 'rdd') 6 .. 7"]
def test_possible_string_format_functions():
"""
The QuantityInfo info class for Quantity implements a
possible_string_format_functions() method that overrides the
standard pprint._possible_string_format_functions() function.
Test this.
"""
t = QTable([[1, 2] * u.m])
t['col0'].info.format = '%.3f'
assert t.pformat() == [' col0',
' m ',
'-----',
'1.000',
'2.000']
t['col0'].info.format = 'hi {:.3f}'
assert t.pformat() == [' col0 ',
' m ',
'--------',
'hi 1.000',
'hi 2.000']
t['col0'].info.format = '.4f'
assert t.pformat() == [' col0 ',
' m ',
'------',
'1.0000',
'2.0000']
def test_rename_mixin_columns(mixin_cols):
"""
Rename a mixin column.
"""
t = QTable(mixin_cols)
tc = t.copy()
t.rename_column('m', 'mm')
assert t.colnames == ['i', 'a', 'b', 'mm']
if isinstance(t['mm'], table_helpers.ArrayWrapper):
assert np.all(t['mm'].data == tc['m'].data)
elif isinstance(t['mm'], coordinates.SkyCoord):
assert np.all(t['mm'].ra == tc['m'].ra)
assert np.all(t['mm'].dec == tc['m'].dec)
elif isinstance(t['mm'], coordinates.BaseRepresentationOrDifferential):
assert np.all(representation_equal(t['mm'], tc['m']))
else:
assert np.all(t['mm'] == tc['m'])
def test_represent_mixins_as_columns_unit_fix():
"""
If the unit is invalid for a column that gets serialized this would
cause an exception. Fixed in #7481.
"""
t = Table({'a': [1, 2]}, masked=True)
t['a'].unit = 'not a valid unit'
t['a'].mask[1] = True
serialize.represent_mixins_as_columns(t)
def test_primary_data_column_gets_description():
"""
If the mixin defines a primary data column, that should get the
description, format, etc., so no __info__ should be needed.
"""
t = QTable({'a': [1, 2] * u.m})
t['a'].info.description = 'parrot'
t['a'].info.format = '7.2f'
tser = serialize.represent_mixins_as_columns(t)
assert '__info__' not in tser.meta['__serialized_columns__']['a']
assert tser['a'].format == '7.2f'
assert tser['a'].description == 'parrot'
def test_skycoord_with_velocity():
# Regression test for gh-6447
sc = SkyCoord([1], [2], unit='deg', galcen_v_sun=None)
t = Table([sc])
s = StringIO()
t.write(s, format='ascii.ecsv', overwrite=True)
s.seek(0)
t2 = Table.read(s.read(), format='ascii.ecsv')
assert skycoord_equal(t2['col0'], sc)
@pytest.mark.parametrize('table_cls', [Table, QTable])
def test_ensure_input_info_is_unchanged(table_cls):
"""If a mixin input to a table has no info, it should stay that way.
This since having 'info' slows down slicing, etc.
See gh-11066.
"""
q = [1, 2] * u.m
assert 'info' not in q.__dict__
t = table_cls([q], names=['q'])
assert 'info' not in q.__dict__
t = table_cls([q])
assert 'info' not in q.__dict__
t = table_cls({'q': q})
assert 'info' not in q.__dict__
t['q2'] = q
assert 'info' not in q.__dict__
sc = SkyCoord([1, 2], [2, 3], unit='deg')
t['sc'] = sc
assert 'info' not in sc.__dict__
def test_bad_info_class():
"""Make a mixin column class that does not trigger the machinery to generate
a pure column representation"""
class MyArrayWrapper(ArrayWrapper):
info = ParentDtypeInfo()
t = Table()
t['tm'] = MyArrayWrapper([0, 1, 2])
out = StringIO()
match = r"failed to represent column 'tm' \(MyArrayWrapper\) as one or more Column subclasses"
with pytest.raises(TypeError, match=match):
represent_mixins_as_columns(t)
|
eafc44aee242fbe16b5d6d2664c0ee4502c62690197bb9266449a9caec418aea | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from io import StringIO
from astropy import table
from astropy.io import ascii
from astropy.table import Table, QTable
from astropy.table.table_helpers import simple_table
from astropy import units as u
from astropy.utils import console
BIG_WIDE_ARR = np.arange(2000, dtype=np.float64).reshape(100, 20)
SMALL_ARR = np.arange(18, dtype=np.int64).reshape(6, 3)
@pytest.mark.usefixtures('table_type')
class TestMultiD():
def test_multidim(self, table_type):
"""Test printing with multidimensional column"""
arr = [np.array([[1, 2],
[10, 20]], dtype=np.int64),
np.array([[3, 4],
[30, 40]], dtype=np.int64),
np.array([[5, 6],
[50, 60]], dtype=np.int64)]
t = table_type(arr)
lines = t.pformat(show_dtype=True)
assert lines == [' col0 col1 col2 ',
'int64[2] int64[2] int64[2]',
'-------- -------- --------',
' 1 .. 2 3 .. 4 5 .. 6',
'10 .. 20 30 .. 40 50 .. 60']
lines = t.pformat(html=True, show_dtype=True)
assert lines == [
f'<table id="table{id(t)}">',
'<thead><tr><th>col0</th><th>col1</th><th>col2</th></tr></thead>',
'<thead><tr><th>int64[2]</th><th>int64[2]</th><th>int64[2]</th></tr></thead>',
'<tr><td>1 .. 2</td><td>3 .. 4</td><td>5 .. 6</td></tr>',
'<tr><td>10 .. 20</td><td>30 .. 40</td><td>50 .. 60</td></tr>',
'</table>']
nbclass = table.conf.default_notebook_table_class
masked = 'masked=True ' if t.masked else ''
assert t._repr_html_().splitlines() == [
f'<div><i>{table_type.__name__} {masked}length=2</i>',
f'<table id="table{id(t)}" class="{nbclass}">',
'<thead><tr><th>col0</th><th>col1</th><th>col2</th></tr></thead>',
'<thead><tr><th>int64[2]</th><th>int64[2]</th><th>int64[2]</th></tr></thead>',
'<tr><td>1 .. 2</td><td>3 .. 4</td><td>5 .. 6</td></tr>',
'<tr><td>10 .. 20</td><td>30 .. 40</td><td>50 .. 60</td></tr>',
'</table></div>']
t = table_type([arr])
lines = t.pformat(show_dtype=True)
assert lines == [' col0 ',
'int64[2,2]',
'----------',
' 1 .. 20',
' 3 .. 40',
' 5 .. 60']
def test_fake_multidim(self, table_type):
"""Test printing with 'fake' multidimensional column"""
arr = [np.array([[(1,)],
[(10,)]], dtype=np.int64),
np.array([[(3,)],
[(30,)]], dtype=np.int64),
np.array([[(5,)],
[(50,)]], dtype=np.int64)]
t = table_type(arr)
lines = t.pformat(show_dtype=True)
assert lines == [
" col0 col1 col2 ",
"int64[1,1] int64[1,1] int64[1,1]",
"---------- ---------- ----------",
" 1 3 5",
" 10 30 50"]
lines = t.pformat(html=True, show_dtype=True)
assert lines == [
f'<table id="table{id(t)}">',
'<thead><tr><th>col0</th><th>col1</th><th>col2</th></tr></thead>',
'<thead><tr><th>int64[1,1]</th><th>int64[1,1]</th><th>int64[1,1]</th></tr></thead>',
'<tr><td>1</td><td>3</td><td>5</td></tr>',
'<tr><td>10</td><td>30</td><td>50</td></tr>',
'</table>']
nbclass = table.conf.default_notebook_table_class
masked = 'masked=True ' if t.masked else ''
assert t._repr_html_().splitlines() == [
f'<div><i>{table_type.__name__} {masked}length=2</i>',
f'<table id="table{id(t)}" class="{nbclass}">',
'<thead><tr><th>col0</th><th>col1</th><th>col2</th></tr></thead>',
'<thead><tr><th>int64[1,1]</th><th>int64[1,1]</th><th>int64[1,1]</th></tr></thead>',
'<tr><td>1</td><td>3</td><td>5</td></tr>',
'<tr><td>10</td><td>30</td><td>50</td></tr>',
'</table></div>']
t = table_type([arr])
lines = t.pformat(show_dtype=True)
assert lines == [' col0 ',
'int64[2,1,1]',
'------------',
' 1 .. 10',
' 3 .. 30',
' 5 .. 50']
def test_html_escaping():
t = table.Table([('<script>alert("gotcha");</script>', 2, 3)])
nbclass = table.conf.default_notebook_table_class
assert t._repr_html_().splitlines() == [
'<div><i>Table length=3</i>',
f'<table id="table{id(t)}" class="{nbclass}">',
'<thead><tr><th>col0</th></tr></thead>',
'<thead><tr><th>str33</th></tr></thead>',
'<tr><td><script>alert("gotcha");</script></td></tr>',
'<tr><td>2</td></tr>',
'<tr><td>3</td></tr>',
'</table></div>']
@pytest.mark.usefixtures('table_type')
class TestPprint():
def _setup(self, table_type):
self.tb = table_type(BIG_WIDE_ARR)
self.tb['col0'].format = 'e'
self.tb['col1'].format = '.6f'
self.tb['col0'].unit = 'km**2'
self.tb['col19'].unit = 'kg s m**-2'
self.ts = table_type(SMALL_ARR)
def test_empty_table(self, table_type):
t = table_type()
lines = t.pformat()
assert lines == ['<No columns>']
c = repr(t)
masked = 'masked=True ' if t.masked else ''
assert c.splitlines() == [f'<{table_type.__name__} {masked}length=0>',
'<No columns>']
def test_format0(self, table_type):
"""Try getting screen size but fail to defaults because testing doesn't
have access to screen (fcntl.ioctl fails).
"""
self._setup(table_type)
arr = np.arange(4000, dtype=np.float64).reshape(100, 40)
lines = table_type(arr).pformat()
nlines, width = console.terminal_size()
assert len(lines) == nlines
for line in lines[:-1]: # skip last "Length = .. rows" line
assert width - 10 < len(line) <= width
def test_format1(self, table_type):
"""Basic test of formatting, unit header row included"""
self._setup(table_type)
lines = self.tb.pformat(max_lines=8, max_width=40)
assert lines == [' col0 col1 ... col19 ',
' km2 ... kg s / m2',
'------------ ----------- ... ---------',
'0.000000e+00 1.000000 ... 19.0',
' ... ... ... ...',
'1.960000e+03 1961.000000 ... 1979.0',
'1.980000e+03 1981.000000 ... 1999.0',
'Length = 100 rows']
def test_format2(self, table_type):
"""Basic test of formatting, unit header row excluded"""
self._setup(table_type)
lines = self.tb.pformat(max_lines=8, max_width=40, show_unit=False)
assert lines == [' col0 col1 ... col19 ',
'------------ ----------- ... ------',
'0.000000e+00 1.000000 ... 19.0',
'2.000000e+01 21.000000 ... 39.0',
' ... ... ... ...',
'1.960000e+03 1961.000000 ... 1979.0',
'1.980000e+03 1981.000000 ... 1999.0',
'Length = 100 rows']
def test_format3(self, table_type):
"""Include the unit header row"""
self._setup(table_type)
lines = self.tb.pformat(max_lines=8, max_width=40, show_unit=True)
assert lines == [' col0 col1 ... col19 ',
' km2 ... kg s / m2',
'------------ ----------- ... ---------',
'0.000000e+00 1.000000 ... 19.0',
' ... ... ... ...',
'1.960000e+03 1961.000000 ... 1979.0',
'1.980000e+03 1981.000000 ... 1999.0',
'Length = 100 rows']
def test_format4(self, table_type):
"""Do not include the name header row"""
self._setup(table_type)
lines = self.tb.pformat(max_lines=8, max_width=40, show_name=False)
assert lines == [' km2 ... kg s / m2',
'------------ ----------- ... ---------',
'0.000000e+00 1.000000 ... 19.0',
'2.000000e+01 21.000000 ... 39.0',
' ... ... ... ...',
'1.960000e+03 1961.000000 ... 1979.0',
'1.980000e+03 1981.000000 ... 1999.0',
'Length = 100 rows']
def test_noclip(self, table_type):
"""Basic table print"""
self._setup(table_type)
lines = self.ts.pformat(max_lines=-1, max_width=-1)
assert lines == ['col0 col1 col2',
'---- ---- ----',
' 0 1 2',
' 3 4 5',
' 6 7 8',
' 9 10 11',
' 12 13 14',
' 15 16 17']
def test_clip1(self, table_type):
"""max lines below hard limit of 8
"""
self._setup(table_type)
lines = self.ts.pformat(max_lines=3, max_width=-1)
assert lines == ['col0 col1 col2',
'---- ---- ----',
' 0 1 2',
' 3 4 5',
' 6 7 8',
' 9 10 11',
' 12 13 14',
' 15 16 17']
def test_clip2(self, table_type):
"""max lines below hard limit of 8 and output longer than 8
"""
self._setup(table_type)
lines = self.ts.pformat(max_lines=3, max_width=-1, show_unit=True, show_dtype=True)
assert lines == [' col0 col1 col2',
' ',
'int64 int64 int64',
'----- ----- -----',
' 0 1 2',
' ... ... ...',
' 15 16 17',
'Length = 6 rows']
def test_clip3(self, table_type):
"""Max lines below hard limit of 8 and max width below hard limit
of 10
"""
self._setup(table_type)
lines = self.ts.pformat(max_lines=3, max_width=1, show_unit=True)
assert lines == ['col0 ...',
' ...',
'---- ...',
' 0 ...',
' ... ...',
' 12 ...',
' 15 ...',
'Length = 6 rows']
def test_clip4(self, table_type):
"""Test a range of max_lines"""
self._setup(table_type)
for max_lines in (0, 1, 4, 5, 6, 7, 8, 100, 101, 102, 103, 104, 130):
lines = self.tb.pformat(max_lines=max_lines, show_unit=False)
assert len(lines) == max(8, min(102, max_lines))
def test_pformat_all(self, table_type):
"""Test that all rows are printed by default"""
self._setup(table_type)
lines = self.tb.pformat_all()
# +3 accounts for the three header lines in this table
assert len(lines) == BIG_WIDE_ARR.shape[0] + 3
@pytest.fixture
def test_pprint_all(self, table_type, capsys):
"""Test that all rows are printed by default"""
self._setup(table_type)
self.tb.pprint_all()
(out, err) = capsys.readouterr()
# +3 accounts for the three header lines in this table
assert len(out) == BIG_WIDE_ARR.shape[0] + 3
@pytest.mark.usefixtures('table_type')
class TestFormat():
def test_column_format(self, table_type):
t = table_type([[1, 2], [3, 4]], names=('a', 'b'))
# default (format=None)
assert str(t['a']) == ' a \n---\n 1\n 2'
# just a plain format string
t['a'].format = '5.2f'
assert str(t['a']) == ' a \n-----\n 1.00\n 2.00'
# Old-style that is almost new-style
t['a'].format = '{ %4.2f }'
assert str(t['a']) == ' a \n--------\n{ 1.00 }\n{ 2.00 }'
# New-style that is almost old-style
t['a'].format = '%{0:}'
assert str(t['a']) == ' a \n---\n %1\n %2'
# New-style with extra spaces
t['a'].format = ' {0:05d} '
assert str(t['a']) == ' a \n-------\n 00001 \n 00002 '
# New-style has precedence
t['a'].format = '%4.2f {0:}'
assert str(t['a']) == ' a \n-------\n%4.2f 1\n%4.2f 2'
# Invalid format spec
with pytest.raises(ValueError):
t['a'].format = 'fail'
assert t['a'].format == '%4.2f {0:}' # format did not change
def test_column_format_with_threshold(self, table_type):
from astropy import conf
with conf.set_temp('max_lines', 8):
t = table_type([np.arange(20)], names=['a'])
t['a'].format = '%{0:}'
assert str(t['a']).splitlines() == [' a ',
'---',
' %0',
' %1',
'...',
'%18',
'%19',
'Length = 20 rows']
t['a'].format = '{ %4.2f }'
assert str(t['a']).splitlines() == [' a ',
'---------',
' { 0.00 }',
' { 1.00 }',
' ...',
'{ 18.00 }',
'{ 19.00 }',
'Length = 20 rows']
def test_column_format_func(self, table_type):
# run most of functions twice
# 1) astropy.table.pprint._format_funcs gets populated
# 2) astropy.table.pprint._format_funcs gets used
t = table_type([[1., 2.], [3, 4]], names=('a', 'b'))
# mathematical function
t['a'].format = lambda x: str(x * 3.)
assert str(t['a']) == ' a \n---\n3.0\n6.0'
assert str(t['a']) == ' a \n---\n3.0\n6.0'
def test_column_format_callable(self, table_type):
# run most of functions twice
# 1) astropy.table.pprint._format_funcs gets populated
# 2) astropy.table.pprint._format_funcs gets used
t = table_type([[1., 2.], [3, 4]], names=('a', 'b'))
# mathematical function
class format:
def __call__(self, x):
return str(x * 3.)
t['a'].format = format()
assert str(t['a']) == ' a \n---\n3.0\n6.0'
assert str(t['a']) == ' a \n---\n3.0\n6.0'
def test_column_format_func_wrong_number_args(self, table_type):
t = table_type([[1., 2.], [3, 4]], names=('a', 'b'))
# function that expects wrong number of arguments
def func(a, b):
pass
with pytest.raises(ValueError):
t['a'].format = func
def test_column_format_func_multiD(self, table_type):
arr = [np.array([[1, 2],
[10, 20]], dtype='i8')]
t = table_type(arr, names=['a'])
# mathematical function
t['a'].format = lambda x: str(x * 3.)
outstr = (' a \n'
'------------\n'
' 3.0 .. 6.0\n'
'30.0 .. 60.0')
assert str(t['a']) == outstr
def test_column_format_func_not_str(self, table_type):
t = table_type([[1., 2.], [3, 4]], names=('a', 'b'))
# mathematical function
with pytest.raises(ValueError):
t['a'].format = lambda x: x * 3
def test_column_alignment(self, table_type):
t = table_type([[1], [2], [3], [4]],
names=('long title a', 'long title b',
'long title c', 'long title d'))
t['long title a'].format = '<'
t['long title b'].format = '^'
t['long title c'].format = '>'
t['long title d'].format = '0='
assert str(t['long title a']) == 'long title a\n------------\n1 '
assert str(t['long title b']) == 'long title b\n------------\n 2 '
assert str(t['long title c']) == 'long title c\n------------\n 3'
assert str(t['long title d']) == 'long title d\n------------\n000000000004'
class TestFormatWithMaskedElements():
def test_column_format(self):
t = Table([[1, 2, 3], [3, 4, 5]], names=('a', 'b'), masked=True)
t['a'].mask = [True, False, True]
# default (format=None)
assert str(t['a']) == ' a \n---\n --\n 2\n --'
# just a plain format string
t['a'].format = '5.2f'
assert str(t['a']) == ' a \n-----\n --\n 2.00\n --'
# Old-style that is almost new-style
t['a'].format = '{ %4.2f }'
assert str(t['a']) == ' a \n--------\n --\n{ 2.00 }\n --'
# New-style that is almost old-style
t['a'].format = '%{0:}'
assert str(t['a']) == ' a \n---\n --\n %2\n --'
# New-style with extra spaces
t['a'].format = ' {0:05d} '
assert str(t['a']) == ' a \n-------\n --\n 00002 \n --'
# New-style has precedence
t['a'].format = '%4.2f {0:}'
assert str(t['a']) == ' a \n-------\n --\n%4.2f 2\n --'
def test_column_format_with_threshold_masked_table(self):
from astropy import conf
with conf.set_temp('max_lines', 8):
t = Table([np.arange(20)], names=['a'], masked=True)
t['a'].format = '%{0:}'
t['a'].mask[0] = True
t['a'].mask[-1] = True
assert str(t['a']).splitlines() == [' a ',
'---',
' --',
' %1',
'...',
'%18',
' --',
'Length = 20 rows']
t['a'].format = '{ %4.2f }'
assert str(t['a']).splitlines() == [' a ',
'---------',
' --',
' { 1.00 }',
' ...',
'{ 18.00 }',
' --',
'Length = 20 rows']
def test_column_format_func(self):
# run most of functions twice
# 1) astropy.table.pprint._format_funcs gets populated
# 2) astropy.table.pprint._format_funcs gets used
t = Table([[1., 2., 3.], [3, 4, 5]], names=('a', 'b'), masked=True)
t['a'].mask = [True, False, True]
# mathematical function
t['a'].format = lambda x: str(x * 3.)
assert str(t['a']) == ' a \n---\n --\n6.0\n --'
assert str(t['a']) == ' a \n---\n --\n6.0\n --'
def test_column_format_func_with_special_masked(self):
# run most of functions twice
# 1) astropy.table.pprint._format_funcs gets populated
# 2) astropy.table.pprint._format_funcs gets used
t = Table([[1., 2., 3.], [3, 4, 5]], names=('a', 'b'), masked=True)
t['a'].mask = [True, False, True]
# mathematical function
def format_func(x):
if x is np.ma.masked:
return '!!'
else:
return str(x * 3.)
t['a'].format = format_func
assert str(t['a']) == ' a \n---\n !!\n6.0\n !!'
assert str(t['a']) == ' a \n---\n !!\n6.0\n !!'
def test_column_format_callable(self):
# run most of functions twice
# 1) astropy.table.pprint._format_funcs gets populated
# 2) astropy.table.pprint._format_funcs gets used
t = Table([[1., 2., 3.], [3, 4, 5]], names=('a', 'b'), masked=True)
t['a'].mask = [True, False, True]
# mathematical function
class format:
def __call__(self, x):
return str(x * 3.)
t['a'].format = format()
assert str(t['a']) == ' a \n---\n --\n6.0\n --'
assert str(t['a']) == ' a \n---\n --\n6.0\n --'
def test_column_format_func_wrong_number_args(self):
t = Table([[1., 2.], [3, 4]], names=('a', 'b'), masked=True)
t['a'].mask = [True, False]
# function that expects wrong number of arguments
def func(a, b):
pass
with pytest.raises(ValueError):
t['a'].format = func
# but if all are masked, it never gets called
t['a'].mask = [True, True]
assert str(t['a']) == ' a \n---\n --\n --'
def test_column_format_func_multiD(self):
arr = [np.array([[1, 2],
[10, 20]], dtype='i8')]
t = Table(arr, names=['a'], masked=True)
t['a'].mask[0, 1] = True
t['a'].mask[1, 1] = True
# mathematical function
t['a'].format = lambda x: str(x * 3.)
outstr = (' a \n'
'----------\n'
' 3.0 .. --\n'
'30.0 .. --')
assert str(t['a']) == outstr
assert str(t['a']) == outstr
def test_pprint_npfloat32():
"""
Test for #148, that np.float32 cannot by itself be formatted as float,
but has to be converted to a python float.
"""
dat = np.array([1., 2.], dtype=np.float32)
t = Table([dat], names=['a'])
t['a'].format = '5.2f'
assert str(t['a']) == ' a \n-----\n 1.00\n 2.00'
def test_pprint_py3_bytes():
"""
Test for #1346 and #4944. Make sure a bytestring (dtype=S<N>) in Python 3
is printed correctly (without the "b" prefix like b'string').
"""
val = bytes('val', encoding='utf-8')
blah = 'bläh'.encode()
dat = np.array([val, blah], dtype=[('col', 'S10')])
t = table.Table(dat)
assert t['col'].pformat() == ['col ', '----', ' val', 'bläh']
def test_pprint_structured():
su = table.Column([(1, (1.5, [1.6, 1.7])),
(2, (2.5, [2.6, 2.7]))],
name='su',
dtype=[('i', np.int64),
('f', [('p0', np.float64), ('p1', np.float64, (2,))])])
assert su.pformat() == [
" su [i, f[p0, p1]] ",
"----------------------",
"(1, (1.5, [1.6, 1.7]))",
"(2, (2.5, [2.6, 2.7]))"]
t = table.Table([su])
assert t.pformat() == su.pformat()
assert repr(t).splitlines() == [
"<Table length=2>",
" su [i, f[p0, p1]] ",
"(int64, (float64, float64[2]))",
"------------------------------",
" (1, (1.5, [1.6, 1.7]))",
" (2, (2.5, [2.6, 2.7]))"]
def test_pprint_structured_with_format():
dtype = np.dtype([('par', 'f8'), ('min', 'f8'), ('id', 'i4'), ('name', 'U4')])
c = table.Column([(1.2345678, -20, 3, 'bar'),
(12.345678, 4.5678, 33, 'foo')], dtype=dtype)
t = table.Table()
t['a'] = [1, 2]
t['c'] = c
t['c'].info.format = '{par:6.2f} {min:5.1f} {id:03d} {name:4s}'
exp = [
' a c [par, min, id, name]',
'--- ----------------------',
' 1 1.23 -20.0 003 bar ',
' 2 12.35 4.6 033 foo ']
assert t.pformat_all() == exp
def test_pprint_nameless_col():
"""Regression test for #2213, making sure a nameless column can be printed
using None as the name.
"""
col = table.Column([1., 2.])
assert str(col).startswith('None')
def test_html():
"""Test HTML printing"""
dat = np.array([1., 2.], dtype=np.float32)
t = Table([dat], names=['a'])
lines = t.pformat(html=True)
assert lines == [f'<table id="table{id(t)}">',
'<thead><tr><th>a</th></tr></thead>',
'<tr><td>1.0</td></tr>',
'<tr><td>2.0</td></tr>',
'</table>']
lines = t.pformat(html=True, tableclass='table-striped')
assert lines == [
f'<table id="table{id(t)}" class="table-striped">',
'<thead><tr><th>a</th></tr></thead>',
'<tr><td>1.0</td></tr>',
'<tr><td>2.0</td></tr>',
'</table>']
lines = t.pformat(html=True, tableclass=['table', 'table-striped'])
assert lines == [
f'<table id="table{id(t)}" class="table table-striped">',
'<thead><tr><th>a</th></tr></thead>',
'<tr><td>1.0</td></tr>',
'<tr><td>2.0</td></tr>',
'</table>']
def test_align():
t = simple_table(2, kinds='iS')
assert t.pformat() == [' a b ',
'--- ---',
' 1 b',
' 2 c']
# Use column format attribute
t['a'].format = '<'
assert t.pformat() == [' a b ',
'--- ---',
'1 b',
'2 c']
# Now override column format attribute with various combinations of align
tpf = [' a b ',
'--- ---',
' 1 b ',
' 2 c ']
for align in ('^', ['^', '^'], ('^', '^')):
assert tpf == t.pformat(align=align)
assert t.pformat(align='<') == [' a b ',
'--- ---',
'1 b ',
'2 c ']
assert t.pformat(align='0=') == [' a b ',
'--- ---',
'001 00b',
'002 00c']
assert t.pformat(align=['<', '^']) == [' a b ',
'--- ---',
'1 b ',
'2 c ']
# Now use fill characters. Stress the system using a fill
# character that is the same as an align character.
t = simple_table(2, kinds='iS')
assert t.pformat(align='^^') == [' a b ',
'--- ---',
'^1^ ^b^',
'^2^ ^c^']
assert t.pformat(align='^>') == [' a b ',
'--- ---',
'^^1 ^^b',
'^^2 ^^c']
assert t.pformat(align='^<') == [' a b ',
'--- ---',
'1^^ b^^',
'2^^ c^^']
# Complicated interaction (same as narrative docs example)
t1 = Table([[1.0, 2.0], [1, 2]], names=['column1', 'column2'])
t1['column1'].format = '#^.2f'
assert t1.pformat() == ['column1 column2',
'------- -------',
'##1.00# 1',
'##2.00# 2']
assert t1.pformat(align='!<') == ['column1 column2',
'------- -------',
'1.00!!! 1!!!!!!',
'2.00!!! 2!!!!!!']
assert t1.pformat(align=[None, '!<']) == ['column1 column2',
'------- -------',
'##1.00# 1!!!!!!',
'##2.00# 2!!!!!!']
# Zero fill
t['a'].format = '+d'
assert t.pformat(align='0=') == [' a b ',
'--- ---',
'+01 00b',
'+02 00c']
with pytest.raises(ValueError):
t.pformat(align=['fail'])
with pytest.raises(TypeError):
t.pformat(align=0)
with pytest.raises(TypeError):
t.pprint(align=0)
# Make sure pprint() does not raise an exception
t.pprint()
with pytest.raises(ValueError):
t.pprint(align=['<', '<', '<'])
with pytest.raises(ValueError):
t.pprint(align='x=')
def test_auto_format_func():
"""Test for #5802 (fix for #5800 where format_func key is not unique)"""
t = Table([[1, 2] * u.m])
t['col0'].format = '%f'
t.pformat() # Force caching of format function
qt = QTable(t)
qt.pformat() # Generates exception prior to #5802
def test_decode_replace():
"""
Test printing a bytestring column with a value that fails
decoding to utf-8 and gets replaced by U+FFFD. See
https://docs.python.org/3/library/codecs.html#codecs.replace_errors
"""
t = Table([[b'Z\xf0']])
assert t.pformat() == ['col0', '----', ' Z\ufffd']
class TestColumnsShowHide:
"""Tests of show and hide table columns"""
def setup_method(self):
self.t = simple_table(size=1, cols=4, kinds='i')
@pytest.mark.parametrize('attr', ('pprint_exclude_names', 'pprint_include_names'))
def test_basic(self, attr):
t = self.t
assert repr(getattr(Table, attr)) == f'<PprintIncludeExclude name={attr} default=None>'
t_show_hide = getattr(t, attr)
assert repr(t_show_hide) == f'<PprintIncludeExclude name={attr} value=None>'
# Default value is None
assert t_show_hide() is None
def test_slice(self):
t = self.t
t.pprint_include_names = 'a'
t.pprint_exclude_names = 'b'
t2 = t[0:1]
assert t2.pprint_include_names() == ('a',)
assert t2.pprint_exclude_names() == ('b',)
def test_copy(self):
t = self.t
t.pprint_include_names = 'a'
t.pprint_exclude_names = 'b'
t2 = t.copy()
assert t2.pprint_include_names() == ('a',)
assert t2.pprint_exclude_names() == ('b',)
t2.pprint_include_names = 'c'
t2.pprint_exclude_names = 'd'
assert t.pprint_include_names() == ('a',)
assert t.pprint_exclude_names() == ('b',)
assert t2.pprint_include_names() == ('c',)
assert t2.pprint_exclude_names() == ('d',)
@pytest.mark.parametrize('attr', ('pprint_exclude_names', 'pprint_include_names'))
@pytest.mark.parametrize('value', ('z', ['a', 'z']))
def test_setting(self, attr, value):
t = self.t
t_show_hide = getattr(t, attr)
# Expected attribute value ('z',) or ('a', 'z')
exp = (value,) if isinstance(value, str) else tuple(value)
# Context manager, can include column names that do not exist
with t_show_hide.set(value):
assert t_show_hide() == exp
assert t.meta['__attributes__'] == {attr: exp}
assert t_show_hide() is None
# Setting back to None clears out meta
assert t.meta == {}
# Do `t.pprint_include_names/hide = value`
setattr(t, attr, value)
assert t_show_hide() == exp
# Clear attribute
t_show_hide.set(None)
assert t_show_hide() is None
# Now use set() method
t_show_hide.set(value)
assert t_show_hide() == exp
with t_show_hide.set(None):
assert t_show_hide() is None
assert t.meta == {}
assert t_show_hide() == exp
@pytest.mark.parametrize('attr', ('pprint_exclude_names', 'pprint_include_names'))
@pytest.mark.parametrize('value', ('z', ['a', 'z'], ('a', 'z')))
def test_add_remove(self, attr, value):
t = self.t
t_show_hide = getattr(t, attr)
# Expected attribute value ('z') or ('a', 'z')
exp = (value,) if isinstance(value, str) else tuple(value)
# add() method for str or list of str
t_show_hide.add(value)
assert t_show_hide() == exp
# Adding twice has no effect
t_show_hide.add(value)
assert t_show_hide() == exp
# Remove values (str or list of str). Reverts to None if all names are
# removed.
t_show_hide.remove(value)
assert t_show_hide() is None
# Remove just one name, possibly leaving a name.
t_show_hide.add(value)
t_show_hide.remove('z')
assert t_show_hide() == (None if value == 'z' else ('a',))
# Cannot remove name not in the list
t_show_hide.set(['a', 'z'])
with pytest.raises(ValueError, match=f'x not in {attr}'):
t_show_hide.remove(('x', 'z'))
@pytest.mark.parametrize('attr', ('pprint_exclude_names', 'pprint_include_names'))
def test_rename(self, attr):
t = self.t
t_hide_show = getattr(t, attr)
t_hide_show.set(['a', 'b'])
t.rename_column('a', 'aa')
assert t_hide_show() == ('aa', 'b')
@pytest.mark.parametrize('attr', ('pprint_exclude_names', 'pprint_include_names'))
def test_remove(self, attr):
t = self.t
t_hide_show = getattr(t, attr)
t_hide_show.set(['a', 'b'])
del t['a']
assert t_hide_show() == ('b',)
def test_serialization(self):
# Serialization works for ECSV. Currently fails for FITS, works with
# HDF5.
t = self.t
t.pprint_exclude_names = ['a', 'y']
t.pprint_include_names = ['b', 'z']
out = StringIO()
ascii.write(t, out, format='ecsv')
t2 = ascii.read(out.getvalue(), format='ecsv')
assert t2.pprint_exclude_names() == ('a', 'y')
assert t2.pprint_include_names() == ('b', 'z')
def test_output(self):
"""Test that pprint_include/exclude_names actually changes the print output"""
t = self.t
exp = [' b d ',
'--- ---',
' 2 4']
with t.pprint_exclude_names.set(['a', 'c']):
out = t.pformat_all()
assert out == exp
with t.pprint_include_names.set(['b', 'd']):
out = t.pformat_all()
assert out == exp
with t.pprint_exclude_names.set(['a', 'c']):
out = t.pformat_all()
assert out == exp
with t.pprint_include_names.set(['b', 'd']):
out = t.pformat_all()
assert out == exp
# Mixture (not common in practice but possible). Note, the trailing
# backslash instead of parens is needed for Python < 3.9. See:
# https://bugs.python.org/issue12782.
with t.pprint_include_names.set(['b', 'c', 'd']), \
t.pprint_exclude_names.set(['c']):
out = t.pformat_all()
assert out == exp
def test_output_globs(self):
"""Test that pprint_include/exclude_names works with globs (fnmatch)"""
t = self.t
t['a2'] = 1
t['a23'] = 2
# Show only the a* columns
exp = [' a a2 a23',
'--- --- ---',
' 1 1 2']
with t.pprint_include_names.set('a*'):
out = t.pformat_all()
assert out == exp
# Show a* but exclude a??
exp = [' a a2',
'--- ---',
' 1 1']
with t.pprint_include_names.set('a*'), t.pprint_exclude_names.set('a??'):
out = t.pformat_all()
assert out == exp
# Exclude a??
exp = [' a b c d a2',
'--- --- --- --- ---',
' 1 2 3 4 1']
with t.pprint_exclude_names.set('a??'):
out = t.pformat_all()
assert out == exp
def test_embedded_newline_tab():
"""Newlines and tabs are escaped in table repr"""
t = Table(rows=[['a', 'b \n c \t \n d'], ['x', 'y\n']])
exp = [
r'col0 col1 ',
r'---- --------------',
r' a b \n c \t \n d',
r' x y\n']
assert t.pformat_all() == exp
|
8fbec438884abdae59875fec7f5c73a67b2e83e264ef2f9b3360eb519795fbe5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.table.bst import BST
def get_tree(TreeType):
b = TreeType([], [])
for val in [5, 2, 9, 3, 4, 1, 6, 10, 8, 7]:
b.add(val)
return b
@pytest.fixture
def tree():
return get_tree(BST)
r'''
5
/ \
2 9
/ \ / \
1 3 6 10
\ \
4 8
/
7
'''
@pytest.fixture
def bst(tree):
return tree
def test_bst_add(bst):
root = bst.root
assert root.data == [5]
assert root.left.data == [2]
assert root.right.data == [9]
assert root.left.left.data == [1]
assert root.left.right.data == [3]
assert root.right.left.data == [6]
assert root.right.right.data == [10]
assert root.left.right.right.data == [4]
assert root.right.left.right.data == [8]
assert root.right.left.right.left.data == [7]
def test_bst_dimensions(bst):
assert bst.size == 10
assert bst.height == 4
def test_bst_find(tree):
bst = tree
for i in range(1, 11):
node = bst.find(i)
assert node == [i]
assert bst.find(0) == []
assert bst.find(11) == []
assert bst.find('1') == []
def test_bst_traverse(bst):
preord = [5, 2, 1, 3, 4, 9, 6, 8, 7, 10]
inord = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
postord = [1, 4, 3, 2, 7, 8, 6, 10, 9, 5]
traversals = {}
for order in ('preorder', 'inorder', 'postorder'):
traversals[order] = [x.key for x in bst.traverse(order)]
assert traversals['preorder'] == preord
assert traversals['inorder'] == inord
assert traversals['postorder'] == postord
def test_bst_remove(bst):
order = (6, 9, 1, 3, 7, 2, 10, 5, 4, 8)
vals = set(range(1, 11))
for i, val in enumerate(order):
assert bst.remove(val) is True
assert bst.is_valid()
assert {x.key for x in bst.traverse('inorder')} == \
vals.difference(order[:i + 1])
assert bst.size == 10 - i - 1
assert bst.remove(-val) is False
def test_bst_duplicate(bst):
bst.add(10, 11)
assert bst.find(10) == [10, 11]
assert bst.remove(10, data=10) is True
assert bst.find(10) == [11]
with pytest.raises(ValueError):
bst.remove(10, data=30) # invalid data
assert bst.remove(10) is True
assert bst.remove(10) is False
def test_bst_range(tree):
bst = tree
lst = bst.range_nodes(4, 8)
assert sorted(x.key for x in lst) == [4, 5, 6, 7, 8]
lst = bst.range_nodes(10, 11)
assert [x.key for x in lst] == [10]
lst = bst.range_nodes(11, 20)
assert len(lst) == 0
|
c9669ce4699102b98897ed1e4a2276544fa6addced52988b6f7a59cdc309279b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
from astropy import units as u
from astropy.convolution.convolve import convolve, convolve_fft
from astropy.convolution.kernels import (Box2DKernel, Gaussian2DKernel,
Moffat2DKernel, Tophat2DKernel)
from astropy.utils.exceptions import AstropyDeprecationWarning
SHAPES_ODD = [[15, 15], [31, 31]]
SHAPES_EVEN = [[8, 8], [16, 16], [32, 32]] # FIXME: not used ?!
NOSHAPE = [[None, None]]
WIDTHS = [2, 3, 4, 5]
KERNELS = []
for shape in SHAPES_ODD + NOSHAPE:
for width in WIDTHS:
KERNELS.append(Gaussian2DKernel(width,
x_size=shape[0],
y_size=shape[1],
mode='oversample',
factor=10))
KERNELS.append(Box2DKernel(width,
x_size=shape[0],
y_size=shape[1],
mode='oversample',
factor=10))
KERNELS.append(Tophat2DKernel(width,
x_size=shape[0],
y_size=shape[1],
mode='oversample',
factor=10))
KERNELS.append(Moffat2DKernel(width, 2,
x_size=shape[0],
y_size=shape[1],
mode='oversample',
factor=10))
class Test2DConvolutions:
@pytest.mark.parametrize('kernel', KERNELS)
def test_centered_makekernel(self, kernel):
"""
Test smoothing of an image with a single positive pixel
"""
shape = kernel.array.shape
x = np.zeros(shape)
xslice = tuple(slice(sh // 2, sh // 2 + 1) for sh in shape)
x[xslice] = 1.0
c2 = convolve_fft(x, kernel, boundary='fill')
c1 = convolve(x, kernel, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize('kernel', KERNELS)
def test_random_makekernel(self, kernel):
"""
Test smoothing of an image made of random noise
"""
shape = kernel.array.shape
x = np.random.randn(*shape)
c2 = convolve_fft(x, kernel, boundary='fill')
c1 = convolve(x, kernel, boundary='fill')
# not clear why, but these differ by a couple ulps...
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(('shape', 'width'), list(itertools.product(SHAPES_ODD, WIDTHS)))
def test_uniform_smallkernel(self, shape, width):
"""
Test smoothing of an image with a single positive pixel
Uses a simple, small kernel
"""
if width % 2 == 0:
# convolve does not accept odd-shape kernels
return
kernel = np.ones([width, width])
x = np.zeros(shape)
xslice = tuple(slice(sh // 2, sh // 2 + 1) for sh in shape)
x[xslice] = 1.0
c2 = convolve_fft(x, kernel, boundary='fill')
c1 = convolve(x, kernel, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(('shape', 'width'), list(itertools.product(SHAPES_ODD, [1, 3, 5])))
def test_smallkernel_Box2DKernel(self, shape, width):
"""
Test smoothing of an image with a single positive pixel
Compares a small uniform kernel to the Box2DKernel
"""
kernel1 = np.ones([width, width]) / float(width) ** 2
kernel2 = Box2DKernel(width, mode='oversample', factor=10)
x = np.zeros(shape)
xslice = tuple(slice(sh // 2, sh // 2 + 1) for sh in shape)
x[xslice] = 1.0
c2 = convolve_fft(x, kernel2, boundary='fill')
c1 = convolve_fft(x, kernel1, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
c2 = convolve(x, kernel2, boundary='fill')
c1 = convolve(x, kernel1, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
def test_gaussian_2d_kernel_quantity():
# Make sure that the angle can be a quantity
kernel1 = Gaussian2DKernel(x_stddev=2, y_stddev=4, theta=45 * u.deg)
kernel2 = Gaussian2DKernel(x_stddev=2, y_stddev=4, theta=np.pi / 4)
assert_allclose(kernel1.array, kernel2.array)
|
014f37139b8e0c333e107a21dffcffae1be054109a16871a411cda82246ef5b4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
from astropy.convolution.convolve import convolve, convolve_fft
from astropy.convolution.kernels import (AiryDisk2DKernel, Box1DKernel, Box2DKernel, CustomKernel,
Gaussian1DKernel, Gaussian2DKernel, Kernel1D, Kernel2D,
Model1DKernel, Model2DKernel, RickerWavelet1DKernel,
RickerWavelet2DKernel, Ring2DKernel, Tophat2DKernel,
Trapezoid1DKernel, TrapezoidDisk2DKernel)
from astropy.convolution.utils import KernelSizeError
from astropy.modeling.models import Box2D, Gaussian1D, Gaussian2D
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
from astropy.utils.exceptions import AstropyUserWarning
WIDTHS_ODD = [3, 5, 7, 9]
WIDTHS_EVEN = [2, 4, 8, 16]
MODES = ['center', 'linear_interp', 'oversample', 'integrate']
KERNEL_TYPES = [Gaussian1DKernel, Gaussian2DKernel,
Box1DKernel, Box2DKernel,
Trapezoid1DKernel, TrapezoidDisk2DKernel,
RickerWavelet1DKernel, Tophat2DKernel, AiryDisk2DKernel,
Ring2DKernel]
NUMS = [1, 1., np.float32(1.), np.float64(1.)]
# Test data
delta_pulse_1D = np.zeros(81)
delta_pulse_1D[40] = 1
delta_pulse_2D = np.zeros((81, 81))
delta_pulse_2D[40, 40] = 1
random_data_1D = np.random.rand(61)
random_data_2D = np.random.rand(61, 61)
class TestKernels:
"""
Test class for the built-in convolution kernels.
"""
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize(('width'), WIDTHS_ODD)
def test_scipy_filter_gaussian(self, width):
"""
Test GaussianKernel against SciPy ndimage gaussian filter.
"""
from scipy.ndimage import gaussian_filter
gauss_kernel_1D = Gaussian1DKernel(width)
gauss_kernel_1D.normalize()
gauss_kernel_2D = Gaussian2DKernel(width)
gauss_kernel_2D.normalize()
astropy_1D = convolve(delta_pulse_1D, gauss_kernel_1D, boundary='fill')
astropy_2D = convolve(delta_pulse_2D, gauss_kernel_2D, boundary='fill')
scipy_1D = gaussian_filter(delta_pulse_1D, width)
scipy_2D = gaussian_filter(delta_pulse_2D, width)
assert_almost_equal(astropy_1D, scipy_1D, decimal=12)
assert_almost_equal(astropy_2D, scipy_2D, decimal=12)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize(('width'), WIDTHS_ODD)
def test_scipy_filter_gaussian_laplace(self, width):
"""
Test RickerWavelet kernels against SciPy ndimage gaussian laplace filters.
"""
from scipy.ndimage import gaussian_laplace
ricker_kernel_1D = RickerWavelet1DKernel(width)
ricker_kernel_2D = RickerWavelet2DKernel(width)
astropy_1D = convolve(delta_pulse_1D, ricker_kernel_1D, boundary='fill', normalize_kernel=False)
astropy_2D = convolve(delta_pulse_2D, ricker_kernel_2D, boundary='fill', normalize_kernel=False)
with pytest.raises(Exception) as exc:
astropy_1D = convolve(delta_pulse_1D, ricker_kernel_1D, boundary='fill', normalize_kernel=True)
assert 'sum is close to zero' in exc.value.args[0]
with pytest.raises(Exception) as exc:
astropy_2D = convolve(delta_pulse_2D, ricker_kernel_2D, boundary='fill', normalize_kernel=True)
assert 'sum is close to zero' in exc.value.args[0]
# The Laplace of Gaussian filter is an inverted Ricker Wavelet filter.
scipy_1D = -gaussian_laplace(delta_pulse_1D, width)
scipy_2D = -gaussian_laplace(delta_pulse_2D, width)
# There is a slight deviation in the normalization. They differ by a
# factor of ~1.0000284132604045. The reason is not known.
assert_almost_equal(astropy_1D, scipy_1D, decimal=5)
assert_almost_equal(astropy_2D, scipy_2D, decimal=5)
@pytest.mark.parametrize(('kernel_type', 'width'), list(itertools.product(KERNEL_TYPES, WIDTHS_ODD)))
def test_delta_data(self, kernel_type, width):
"""
Test smoothing of an image with a single positive pixel
"""
if kernel_type == AiryDisk2DKernel and not HAS_SCIPY:
pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy")
if not kernel_type == Ring2DKernel:
kernel = kernel_type(width)
else:
kernel = kernel_type(width, width * 0.2)
if kernel.dimension == 1:
c1 = convolve_fft(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False)
c2 = convolve(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False)
assert_almost_equal(c1, c2, decimal=12)
else:
c1 = convolve_fft(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False)
c2 = convolve(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False)
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(('kernel_type', 'width'), list(itertools.product(KERNEL_TYPES, WIDTHS_ODD)))
def test_random_data(self, kernel_type, width):
"""
Test smoothing of an image made of random noise
"""
if kernel_type == AiryDisk2DKernel and not HAS_SCIPY:
pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy")
if not kernel_type == Ring2DKernel:
kernel = kernel_type(width)
else:
kernel = kernel_type(width, width * 0.2)
if kernel.dimension == 1:
c1 = convolve_fft(random_data_1D, kernel, boundary='fill', normalize_kernel=False)
c2 = convolve(random_data_1D, kernel, boundary='fill', normalize_kernel=False)
assert_almost_equal(c1, c2, decimal=12)
else:
c1 = convolve_fft(random_data_2D, kernel, boundary='fill', normalize_kernel=False)
c2 = convolve(random_data_2D, kernel, boundary='fill', normalize_kernel=False)
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(('width'), WIDTHS_ODD)
def test_uniform_smallkernel(self, width):
"""
Test smoothing of an image with a single positive pixel
Instead of using kernel class, uses a simple, small kernel
"""
kernel = np.ones([width, width])
c2 = convolve_fft(delta_pulse_2D, kernel, boundary='fill')
c1 = convolve(delta_pulse_2D, kernel, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(('width'), WIDTHS_ODD)
def test_smallkernel_vs_Box2DKernel(self, width):
"""
Test smoothing of an image with a single positive pixel
"""
kernel1 = np.ones([width, width]) / width ** 2
kernel2 = Box2DKernel(width)
c2 = convolve_fft(delta_pulse_2D, kernel2, boundary='fill')
c1 = convolve_fft(delta_pulse_2D, kernel1, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
def test_convolve_1D_kernels(self):
"""
Check if convolving two kernels with each other works correctly.
"""
gauss_1 = Gaussian1DKernel(3)
gauss_2 = Gaussian1DKernel(4)
test_gauss_3 = Gaussian1DKernel(5)
with pytest.warns(AstropyUserWarning, match=r'Both array and kernel '
r'are Kernel instances'):
gauss_3 = convolve(gauss_1, gauss_2)
assert np.all(np.abs((gauss_3 - test_gauss_3).array) < 0.01)
def test_convolve_2D_kernels(self):
"""
Check if convolving two kernels with each other works correctly.
"""
gauss_1 = Gaussian2DKernel(3)
gauss_2 = Gaussian2DKernel(4)
test_gauss_3 = Gaussian2DKernel(5)
with pytest.warns(AstropyUserWarning, match=r'Both array and kernel '
r'are Kernel instances'):
gauss_3 = convolve(gauss_1, gauss_2)
assert np.all(np.abs((gauss_3 - test_gauss_3).array) < 0.01)
@pytest.mark.parametrize(('number'), NUMS)
def test_multiply_scalar(self, number):
"""
Check if multiplying a kernel with a scalar works correctly.
"""
gauss = Gaussian1DKernel(3)
gauss_new = number * gauss
assert_almost_equal(gauss_new.array, gauss.array * number, decimal=12)
@pytest.mark.parametrize(('number'), NUMS)
def test_multiply_scalar_type(self, number):
"""
Check if multiplying a kernel with a scalar works correctly.
"""
gauss = Gaussian1DKernel(3)
gauss_new = number * gauss
assert type(gauss_new) is Gaussian1DKernel
@pytest.mark.parametrize(('number'), NUMS)
def test_rmultiply_scalar_type(self, number):
"""
Check if multiplying a kernel with a scalar works correctly.
"""
gauss = Gaussian1DKernel(3)
gauss_new = gauss * number
assert type(gauss_new) is Gaussian1DKernel
def test_multiply_kernel1d(self):
"""Test that multiplying two 1D kernels raises an exception."""
gauss = Gaussian1DKernel(3)
with pytest.raises(Exception):
gauss * gauss
def test_multiply_kernel2d(self):
"""Test that multiplying two 2D kernels raises an exception."""
gauss = Gaussian2DKernel(3)
with pytest.raises(Exception):
gauss * gauss
def test_multiply_kernel1d_kernel2d(self):
"""
Test that multiplying a 1D kernel with a 2D kernel raises an
exception.
"""
with pytest.raises(Exception):
Gaussian1DKernel(3) * Gaussian2DKernel(3)
def test_add_kernel_scalar(self):
"""Test that adding a scalar to a kernel raises an exception."""
with pytest.raises(Exception):
Gaussian1DKernel(3) + 1
def test_model_1D_kernel(self):
"""
Check Model1DKernel against Gaussian1Dkernel
"""
stddev = 5.
gauss = Gaussian1D(1. / np.sqrt(2 * np.pi * stddev**2), 0, stddev)
model_gauss_kernel = Model1DKernel(gauss, x_size=21)
model_gauss_kernel.normalize()
gauss_kernel = Gaussian1DKernel(stddev, x_size=21)
assert_almost_equal(model_gauss_kernel.array, gauss_kernel.array,
decimal=12)
def test_model_2D_kernel(self):
"""
Check Model2DKernel against Gaussian2Dkernel
"""
stddev = 5.
gauss = Gaussian2D(1. / (2 * np.pi * stddev**2), 0, 0, stddev, stddev)
model_gauss_kernel = Model2DKernel(gauss, x_size=21)
model_gauss_kernel.normalize()
gauss_kernel = Gaussian2DKernel(stddev, x_size=21)
assert_almost_equal(model_gauss_kernel.array, gauss_kernel.array,
decimal=12)
def test_custom_1D_kernel(self):
"""
Check CustomKernel against Box1DKernel.
"""
# Define one dimensional array:
array = np.ones(5)
custom = CustomKernel(array)
custom.normalize()
box = Box1DKernel(5)
c2 = convolve(delta_pulse_1D, custom, boundary='fill')
c1 = convolve(delta_pulse_1D, box, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
def test_custom_2D_kernel(self):
"""
Check CustomKernel against Box2DKernel.
"""
# Define one dimensional array:
array = np.ones((5, 5))
custom = CustomKernel(array)
custom.normalize()
box = Box2DKernel(5)
c2 = convolve(delta_pulse_2D, custom, boundary='fill')
c1 = convolve(delta_pulse_2D, box, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
def test_custom_1D_kernel_list(self):
"""
Check if CustomKernel works with lists.
"""
custom = CustomKernel([1, 1, 1, 1, 1])
assert custom.is_bool is True
def test_custom_2D_kernel_list(self):
"""
Check if CustomKernel works with lists.
"""
custom = CustomKernel([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
assert custom.is_bool is True
def test_custom_1D_kernel_zerosum(self):
"""
Check if CustomKernel works when the input array/list
sums to zero.
"""
array = [-2, -1, 0, 1, 2]
custom = CustomKernel(array)
with pytest.warns(AstropyUserWarning, match=r'kernel cannot be '
r'normalized because it sums to zero'):
custom.normalize()
assert custom.truncation == 1.
assert custom._kernel_sum == 0.
def test_custom_2D_kernel_zerosum(self):
"""
Check if CustomKernel works when the input array/list
sums to zero.
"""
array = [[0, -1, 0], [-1, 4, -1], [0, -1, 0]]
custom = CustomKernel(array)
with pytest.warns(AstropyUserWarning, match=r'kernel cannot be '
r'normalized because it sums to zero'):
custom.normalize()
assert custom.truncation == 1.
assert custom._kernel_sum == 0.
def test_custom_kernel_odd_error(self):
"""
Check if CustomKernel raises if the array size is odd.
"""
with pytest.raises(KernelSizeError):
CustomKernel([1, 1, 1, 1])
def test_add_1D_kernels(self):
"""
Check if adding of two 1D kernels works.
"""
box_1 = Box1DKernel(5)
box_2 = Box1DKernel(3)
box_3 = Box1DKernel(1)
box_sum_1 = box_1 + box_2 + box_3
box_sum_2 = box_2 + box_3 + box_1
box_sum_3 = box_3 + box_1 + box_2
ref = [1/5., 1/5. + 1/3., 1 + 1/3. + 1/5., 1/5. + 1/3., 1/5.]
assert_almost_equal(box_sum_1.array, ref, decimal=12)
assert_almost_equal(box_sum_2.array, ref, decimal=12)
assert_almost_equal(box_sum_3.array, ref, decimal=12)
# Assert that the kernels haven't changed
assert_almost_equal(box_1.array, [0.2, 0.2, 0.2, 0.2, 0.2], decimal=12)
assert_almost_equal(box_2.array, [1/3., 1/3., 1/3.], decimal=12)
assert_almost_equal(box_3.array, [1], decimal=12)
def test_add_2D_kernels(self):
"""
Check if adding of two 1D kernels works.
"""
box_1 = Box2DKernel(3)
box_2 = Box2DKernel(1)
box_sum_1 = box_1 + box_2
box_sum_2 = box_2 + box_1
ref = [[1 / 9., 1 / 9., 1 / 9.],
[1 / 9., 1 + 1 / 9., 1 / 9.],
[1 / 9., 1 / 9., 1 / 9.]]
ref_1 = [[1 / 9., 1 / 9., 1 / 9.],
[1 / 9., 1 / 9., 1 / 9.],
[1 / 9., 1 / 9., 1 / 9.]]
assert_almost_equal(box_2.array, [[1]], decimal=12)
assert_almost_equal(box_1.array, ref_1, decimal=12)
assert_almost_equal(box_sum_1.array, ref, decimal=12)
assert_almost_equal(box_sum_2.array, ref, decimal=12)
def test_Gaussian1DKernel_even_size(self):
"""
Check if even size for GaussianKernel works.
"""
gauss = Gaussian1DKernel(3, x_size=10)
assert gauss.array.size == 10
def test_Gaussian2DKernel_even_size(self):
"""
Check if even size for GaussianKernel works.
"""
gauss = Gaussian2DKernel(3, x_size=10, y_size=10)
assert gauss.array.shape == (10, 10)
# https://github.com/astropy/astropy/issues/3605
def test_Gaussian2DKernel_rotated(self):
gauss = Gaussian2DKernel(
x_stddev=3, y_stddev=1.5, theta=0.7853981633974483,
x_size=5, y_size=5) # rotated 45 deg ccw
ans = [[0.04087193, 0.04442386, 0.03657381, 0.02280797, 0.01077372],
[0.04442386, 0.05704137, 0.05547869, 0.04087193, 0.02280797],
[0.03657381, 0.05547869, 0.06374482, 0.05547869, 0.03657381],
[0.02280797, 0.04087193, 0.05547869, 0.05704137, 0.04442386],
[0.01077372, 0.02280797, 0.03657381, 0.04442386, 0.04087193]]
assert_allclose(gauss, ans, rtol=0.001) # Rough comparison at 0.1 %
def test_normalize_peak(self):
"""
Check if normalize works with peak mode.
"""
custom = CustomKernel([1, 2, 3, 2, 1])
custom.normalize(mode='peak')
assert custom.array.max() == 1
def test_check_kernel_attributes(self):
"""
Check if kernel attributes are correct.
"""
box = Box2DKernel(5)
# Check truncation
assert box.truncation == 0
# Check model
assert isinstance(box.model, Box2D)
# Check center
assert box.center == [2, 2]
# Check normalization
box.normalize()
assert_almost_equal(box._kernel_sum, 1., decimal=12)
# Check separability
assert box.separable
@pytest.mark.parametrize(('kernel_type', 'mode'), list(itertools.product(KERNEL_TYPES, MODES)))
def test_discretize_modes(self, kernel_type, mode):
"""
Check if the different modes result in kernels that work with convolve.
Use only small kernel width, to make the test pass quickly.
"""
if kernel_type == AiryDisk2DKernel and not HAS_SCIPY:
pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy")
if not kernel_type == Ring2DKernel:
kernel = kernel_type(3)
else:
kernel = kernel_type(3, 3 * 0.2)
if kernel.dimension == 1:
c1 = convolve_fft(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False)
c2 = convolve(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False)
assert_almost_equal(c1, c2, decimal=12)
else:
c1 = convolve_fft(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False)
c2 = convolve(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False)
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(('width'), WIDTHS_EVEN)
def test_box_kernels_even_size(self, width):
"""
Check if BoxKernel work properly with even sizes.
"""
kernel_1D = Box1DKernel(width)
assert kernel_1D.shape[0] % 2 != 0
assert kernel_1D.array.sum() == 1.
kernel_2D = Box2DKernel(width)
assert np.all([_ % 2 != 0 for _ in kernel_2D.shape])
assert kernel_2D.array.sum() == 1.
def test_kernel_normalization(self):
"""
Test that repeated normalizations do not change the kernel [#3747].
"""
kernel = CustomKernel(np.ones(5))
kernel.normalize()
data = np.copy(kernel.array)
kernel.normalize()
assert_allclose(data, kernel.array)
kernel.normalize()
assert_allclose(data, kernel.array)
def test_kernel_normalization_mode(self):
"""
Test that an error is raised if mode is invalid.
"""
with pytest.raises(ValueError):
kernel = CustomKernel(np.ones(3))
kernel.normalize(mode='invalid')
def test_kernel1d_int_size(self):
"""
Test that an error is raised if ``Kernel1D`` ``x_size`` is not
an integer.
"""
with pytest.raises(TypeError):
Gaussian1DKernel(3, x_size=1.2)
def test_kernel2d_int_xsize(self):
"""
Test that an error is raised if ``Kernel2D`` ``x_size`` is not
an integer.
"""
with pytest.raises(TypeError):
Gaussian2DKernel(3, x_size=1.2)
def test_kernel2d_int_ysize(self):
"""
Test that an error is raised if ``Kernel2D`` ``y_size`` is not
an integer.
"""
with pytest.raises(TypeError):
Gaussian2DKernel(3, x_size=5, y_size=1.2)
def test_kernel1d_initialization(self):
"""
Test that an error is raised if an array or model is not
specified for ``Kernel1D``.
"""
with pytest.raises(TypeError):
Kernel1D()
def test_kernel2d_initialization(self):
"""
Test that an error is raised if an array or model is not
specified for ``Kernel2D``.
"""
with pytest.raises(TypeError):
Kernel2D()
def test_array_keyword_not_allowed(self):
"""
Regression test for issue #10439
"""
x = np.ones([10, 10])
with pytest.raises(TypeError, match=r".* allowed .*"):
AiryDisk2DKernel(2, array=x)
Box1DKernel(2, array=x)
Box2DKernel(2, array=x)
Gaussian1DKernel(2, array=x)
Gaussian2DKernel(2, array=x)
RickerWavelet1DKernel(2, array=x)
RickerWavelet2DKernel(2, array=x)
Model1DKernel(Gaussian1D(1, 0, 2), array=x)
Model2DKernel(Gaussian2D(1, 0, 0, 2, 2), array=x)
Ring2DKernel(9, 8, array=x)
Tophat2DKernel(2, array=x)
Trapezoid1DKernel(2, array=x)
Trapezoid1DKernel(2, array=x)
|
009485797fd80b8658d732d64a22be481fefb067b22a67cab6b781bf863d1b93 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import os
import sys
import subprocess
import pytest
from astropy.config import (configuration, set_temp_config, paths,
create_config_file)
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyDeprecationWarning
OLD_CONFIG = {}
def setup_module():
OLD_CONFIG.clear()
OLD_CONFIG.update(configuration._cfgobjs)
def teardown_module():
configuration._cfgobjs.clear()
configuration._cfgobjs.update(OLD_CONFIG)
def test_paths():
assert 'astropy' in paths.get_config_dir()
assert 'astropy' in paths.get_cache_dir()
assert 'testpkg' in paths.get_config_dir(rootname='testpkg')
assert 'testpkg' in paths.get_cache_dir(rootname='testpkg')
def test_set_temp_config(tmpdir, monkeypatch):
# Check that we start in an understood state.
assert configuration._cfgobjs == OLD_CONFIG
# Temporarily remove any temporary overrides of the configuration dir.
monkeypatch.setattr(paths.set_temp_config, '_temp_path', None)
orig_config_dir = paths.get_config_dir(rootname='astropy')
temp_config_dir = str(tmpdir.mkdir('config'))
temp_astropy_config = os.path.join(temp_config_dir, 'astropy')
# Test decorator mode
@paths.set_temp_config(temp_config_dir)
def test_func():
assert paths.get_config_dir(rootname='astropy') == temp_astropy_config
# Test temporary restoration of original default
with paths.set_temp_config() as d:
assert d == orig_config_dir == paths.get_config_dir(rootname='astropy')
test_func()
# Test context manager mode (with cleanup)
with paths.set_temp_config(temp_config_dir, delete=True):
assert paths.get_config_dir(rootname='astropy') == temp_astropy_config
assert not os.path.exists(temp_config_dir)
# Check that we have returned to our old configuration.
assert configuration._cfgobjs == OLD_CONFIG
def test_set_temp_cache(tmpdir, monkeypatch):
monkeypatch.setattr(paths.set_temp_cache, '_temp_path', None)
orig_cache_dir = paths.get_cache_dir(rootname='astropy')
temp_cache_dir = str(tmpdir.mkdir('cache'))
temp_astropy_cache = os.path.join(temp_cache_dir, 'astropy')
# Test decorator mode
@paths.set_temp_cache(temp_cache_dir)
def test_func():
assert paths.get_cache_dir(rootname='astropy') == temp_astropy_cache
# Test temporary restoration of original default
with paths.set_temp_cache() as d:
assert d == orig_cache_dir == paths.get_cache_dir(rootname='astropy')
test_func()
# Test context manager mode (with cleanup)
with paths.set_temp_cache(temp_cache_dir, delete=True):
assert paths.get_cache_dir(rootname='astropy') == temp_astropy_cache
assert not os.path.exists(temp_cache_dir)
def test_set_temp_cache_resets_on_exception(tmpdir):
"""Test for regression of bug #9704"""
t = paths.get_cache_dir()
a = tmpdir / 'a'
with open(a, 'wt') as f:
f.write("not a good cache\n")
with pytest.raises(OSError):
with paths.set_temp_cache(a):
pass
assert t == paths.get_cache_dir()
def test_config_file():
from astropy.config.configuration import get_config, reload_config
apycfg = get_config('astropy')
assert apycfg.filename.endswith('astropy.cfg')
cfgsec = get_config('astropy.config')
assert cfgsec.depth == 1
assert cfgsec.name == 'config'
assert cfgsec.parent.filename.endswith('astropy.cfg')
# try with a different package name, still inside astropy config dir:
testcfg = get_config('testpkg', rootname='astropy')
parts = os.path.normpath(testcfg.filename).split(os.sep)
assert '.astropy' in parts or 'astropy' in parts
assert parts[-1] == 'testpkg.cfg'
configuration._cfgobjs['testpkg'] = None # HACK
# try with a different package name, no specified root name (should
# default to astropy):
testcfg = get_config('testpkg')
parts = os.path.normpath(testcfg.filename).split(os.sep)
assert '.astropy' in parts or 'astropy' in parts
assert parts[-1] == 'testpkg.cfg'
configuration._cfgobjs['testpkg'] = None # HACK
# try with a different package name, specified root name:
testcfg = get_config('testpkg', rootname='testpkg')
parts = os.path.normpath(testcfg.filename).split(os.sep)
assert '.testpkg' in parts or 'testpkg' in parts
assert parts[-1] == 'testpkg.cfg'
configuration._cfgobjs['testpkg'] = None # HACK
# try with a subpackage with specified root name:
testcfg_sec = get_config('testpkg.somemodule', rootname='testpkg')
parts = os.path.normpath(testcfg_sec.parent.filename).split(os.sep)
assert '.testpkg' in parts or 'testpkg' in parts
assert parts[-1] == 'testpkg.cfg'
configuration._cfgobjs['testpkg'] = None # HACK
reload_config('astropy')
def check_config(conf):
# test that the output contains some lines that we expect
assert '# unicode_output = False' in conf
assert '[io.fits]' in conf
assert '[table]' in conf
assert '# replace_warnings = ,' in conf
assert '[table.jsviewer]' in conf
assert '# css_urls = https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css,' in conf
assert '[visualization.wcsaxes]' in conf
assert '## Whether to log exceptions before raising them.' in conf
assert '# log_exceptions = False' in conf
def test_generate_config(tmp_path):
from astropy.config.configuration import generate_config
out = io.StringIO()
generate_config('astropy', out)
conf = out.getvalue()
outfile = tmp_path / 'astropy.cfg'
generate_config('astropy', outfile)
with open(outfile) as fp:
conf2 = fp.read()
for c in (conf, conf2):
check_config(c)
def test_generate_config2(tmp_path):
"""Test that generate_config works with the default filename."""
with set_temp_config(tmp_path):
from astropy.config.configuration import generate_config
generate_config('astropy')
assert os.path.exists(tmp_path / 'astropy' / 'astropy.cfg')
with open(tmp_path / 'astropy' / 'astropy.cfg') as fp:
conf = fp.read()
check_config(conf)
def test_create_config_file(tmp_path, caplog):
with set_temp_config(tmp_path):
create_config_file('astropy')
# check that the config file has been created
assert ('The configuration file has been successfully written'
in caplog.records[0].message)
assert os.path.exists(tmp_path / 'astropy' / 'astropy.cfg')
with open(tmp_path / 'astropy' / 'astropy.cfg') as fp:
conf = fp.read()
check_config(conf)
caplog.clear()
# now modify the config file
conf = conf.replace('# unicode_output = False', 'unicode_output = True')
with open(tmp_path / 'astropy' / 'astropy.cfg', mode='w') as fp:
fp.write(conf)
with set_temp_config(tmp_path):
create_config_file('astropy')
# check that the config file has not been overwritten since it was modified
assert ('The configuration file already exists and seems to have been '
'customized' in caplog.records[0].message)
caplog.clear()
with set_temp_config(tmp_path):
create_config_file('astropy', overwrite=True)
# check that the config file has been overwritten
assert ('The configuration file has been successfully written'
in caplog.records[0].message)
def test_configitem():
from astropy.config.configuration import ConfigNamespace, ConfigItem, get_config
ci = ConfigItem(34, 'this is a Description')
class Conf(ConfigNamespace):
tstnm = ci
conf = Conf()
assert ci.module == 'astropy.config.tests.test_configs'
assert ci() == 34
assert ci.description == 'this is a Description'
assert conf.tstnm == 34
sec = get_config(ci.module)
assert sec['tstnm'] == 34
ci.description = 'updated Descr'
ci.set(32)
assert ci() == 32
# It's useful to go back to the default to allow other test functions to
# call this one and still be in the default configuration.
ci.description = 'this is a Description'
ci.set(34)
assert ci() == 34
# Test iterator for one-item namespace
result = [x for x in conf]
assert result == ['tstnm']
result = [x for x in conf.keys()]
assert result == ['tstnm']
result = [x for x in conf.values()]
assert result == [ci]
result = [x for x in conf.items()]
assert result == [('tstnm', ci)]
def test_configitem_types():
from astropy.config.configuration import ConfigNamespace, ConfigItem
ci1 = ConfigItem(34)
ci2 = ConfigItem(34.3)
ci3 = ConfigItem(True)
ci4 = ConfigItem('astring')
class Conf(ConfigNamespace):
tstnm1 = ci1
tstnm2 = ci2
tstnm3 = ci3
tstnm4 = ci4
conf = Conf()
assert isinstance(conf.tstnm1, int)
assert isinstance(conf.tstnm2, float)
assert isinstance(conf.tstnm3, bool)
assert isinstance(conf.tstnm4, str)
with pytest.raises(TypeError):
conf.tstnm1 = 34.3
conf.tstnm2 = 12 # this would should succeed as up-casting
with pytest.raises(TypeError):
conf.tstnm3 = 'fasd'
with pytest.raises(TypeError):
conf.tstnm4 = 546.245
# Test iterator for multi-item namespace. Assume ordered by insertion order.
item_names = [x for x in conf]
assert item_names == ['tstnm1', 'tstnm2', 'tstnm3', 'tstnm4']
result = [x for x in conf.keys()]
assert result == item_names
result = [x for x in conf.values()]
assert result == [ci1, ci2, ci3, ci4]
result = [x for x in conf.items()]
assert result == [('tstnm1', ci1), ('tstnm2', ci2), ('tstnm3', ci3), ('tstnm4', ci4)]
def test_configitem_options(tmpdir):
from astropy.config.configuration import ConfigNamespace, ConfigItem, get_config
cio = ConfigItem(['op1', 'op2', 'op3'])
class Conf(ConfigNamespace):
tstnmo = cio
conf = Conf() # noqa
sec = get_config(cio.module)
assert isinstance(cio(), str)
assert cio() == 'op1'
assert sec['tstnmo'] == 'op1'
cio.set('op2')
with pytest.raises(TypeError):
cio.set('op5')
assert sec['tstnmo'] == 'op2'
# now try saving
apycfg = sec
while apycfg.parent is not apycfg:
apycfg = apycfg.parent
f = tmpdir.join('astropy.cfg')
with open(f.strpath, 'wb') as fd:
apycfg.write(fd)
with open(f.strpath, encoding='utf-8') as fd:
lns = [x.strip() for x in f.readlines()]
assert 'tstnmo = op2' in lns
def test_config_noastropy_fallback(monkeypatch):
"""
Tests to make sure configuration items fall back to their defaults when
there's a problem accessing the astropy directory
"""
# make sure the config directory is not searched
monkeypatch.setenv('XDG_CONFIG_HOME', 'foo')
monkeypatch.delenv('XDG_CONFIG_HOME')
monkeypatch.setattr(paths.set_temp_config, '_temp_path', None)
# make sure the _find_or_create_root_dir function fails as though the
# astropy dir could not be accessed
def osraiser(dirnm, linkto, pkgname=None):
raise OSError
monkeypatch.setattr(paths, '_find_or_create_root_dir', osraiser)
# also have to make sure the stored configuration objects are cleared
monkeypatch.setattr(configuration, '_cfgobjs', {})
with pytest.raises(OSError):
# make sure the config dir search fails
paths.get_config_dir(rootname='astropy')
# now run the basic tests, and make sure the warning about no astropy
# is present
test_configitem()
def test_configitem_setters():
from astropy.config.configuration import ConfigNamespace, ConfigItem
class Conf(ConfigNamespace):
tstnm12 = ConfigItem(42, 'this is another Description')
conf = Conf()
assert conf.tstnm12 == 42
with conf.set_temp('tstnm12', 45):
assert conf.tstnm12 == 45
assert conf.tstnm12 == 42
conf.tstnm12 = 43
assert conf.tstnm12 == 43
with conf.set_temp('tstnm12', 46):
assert conf.tstnm12 == 46
# Make sure it is reset even with Exception
try:
with conf.set_temp('tstnm12', 47):
raise Exception
except Exception:
pass
assert conf.tstnm12 == 43
def test_empty_config_file():
from astropy.config.configuration import is_unedited_config_file
def get_content(fn):
with open(get_pkg_data_filename(fn), encoding='latin-1') as fd:
return fd.read()
content = get_content('data/empty.cfg')
assert is_unedited_config_file(content)
content = get_content('data/not_empty.cfg')
assert not is_unedited_config_file(content)
class TestAliasRead:
def setup_class(self):
configuration._override_config_file = get_pkg_data_filename('data/alias.cfg')
def test_alias_read(self):
from astropy.utils.data import conf
with pytest.warns(
AstropyDeprecationWarning,
match=r"Config parameter 'name_resolve_timeout' in section "
r"\[coordinates.name_resolve\].*") as w:
conf.reload()
assert conf.remote_timeout == 42
assert len(w) == 1
def teardown_class(self):
from astropy.utils.data import conf
configuration._override_config_file = None
conf.reload()
def test_configitem_unicode(tmpdir):
from astropy.config.configuration import ConfigNamespace, ConfigItem, get_config
cio = ConfigItem('ასტრონომიის')
class Conf(ConfigNamespace):
tstunicode = cio
conf = Conf() # noqa
sec = get_config(cio.module)
assert isinstance(cio(), str)
assert cio() == 'ასტრონომიის'
assert sec['tstunicode'] == 'ასტრონომიის'
def test_warning_move_to_top_level():
# Check that the warning about deprecation config items in the
# file works. See #2514
from astropy import conf
configuration._override_config_file = get_pkg_data_filename('data/deprecated.cfg')
try:
with pytest.warns(AstropyDeprecationWarning) as w:
conf.reload()
conf.max_lines
assert len(w) == 1
finally:
configuration._override_config_file = None
conf.reload()
def test_no_home():
# "import astropy" fails when neither $HOME or $XDG_CONFIG_HOME
# are set. To test, we unset those environment variables for a
# subprocess and try to import astropy.
test_path = os.path.dirname(__file__)
astropy_path = os.path.abspath(
os.path.join(test_path, '..', '..', '..'))
env = os.environ.copy()
paths = [astropy_path]
if env.get('PYTHONPATH'):
paths.append(env.get('PYTHONPATH'))
env['PYTHONPATH'] = os.pathsep.join(paths)
for val in ['HOME', 'XDG_CONFIG_HOME']:
if val in env:
del env[val]
retcode = subprocess.check_call(
[sys.executable, '-c', 'import astropy'],
env=env)
assert retcode == 0
|
2489af990e66d1af3a2f711ed276b6d7f599080f270bfa9141d9908f056aecd3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Module to test fitting routines
"""
# pylint: disable=invalid-name
import os.path
import unittest.mock as mk
from importlib.metadata import EntryPoint
from itertools import combinations
from unittest import mock
import numpy as np
import pytest
from numpy import linalg
from numpy.testing import assert_allclose, assert_almost_equal, assert_equal
from astropy.modeling import models
from astropy.modeling.core import Fittable2DModel, Parameter
from astropy.modeling.fitting import (
DogBoxLSQFitter, Fitter, FittingWithOutlierRemoval, JointFitter, LevMarLSQFitter,
LinearLSQFitter, LMLSQFitter, NonFiniteValueError, SimplexLSQFitter, SLSQPLSQFitter,
TRFLSQFitter, _NLLSQFitter, populate_entry_points)
from astropy.modeling.optimizers import Optimization
from astropy.stats import sigma_clip
from astropy.utils import NumpyRNGContext
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from . import irafutil
if HAS_SCIPY:
from scipy import optimize
fitters = [SimplexLSQFitter, SLSQPLSQFitter]
non_linear_fitters = [LevMarLSQFitter, TRFLSQFitter, LMLSQFitter, DogBoxLSQFitter]
_RANDOM_SEED = 0x1337
class TestPolynomial2D:
"""Tests for 2D polynomial fitting."""
def setup_class(self):
self.model = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
def poly2(x, y):
return 1 + 2 * x + 3 * x ** 2 + 4 * y + 5 * y ** 2 + 6 * x * y
self.z = poly2(self.x, self.y)
def test_poly2D_fitting(self):
fitter = LinearLSQFitter()
v = self.model.fit_deriv(x=self.x, y=self.y)
p = linalg.lstsq(v, self.z.flatten(), rcond=-1)[0]
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, p)
def test_eval(self):
fitter = LinearLSQFitter()
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model(self.x, self.y), self.z)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_nonlinear_fitting(self, fitter):
fitter = fitter()
self.model.parameters = [.6, 1.8, 2.9, 3.7, 4.9, 6.7]
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, [1, 2, 3, 4, 5, 6])
@pytest.mark.skipif('not HAS_SCIPY')
def test_compare_nonlinear_fitting(self):
self.model.parameters = [.6, 1.8, 2.9, 3.7, 4.9, 6.7]
fit_models = []
for fitter in non_linear_fitters:
fitter = fitter()
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit_models.append(fitter(self.model, self.x, self.y, self.z))
for pair in combinations(fit_models, 2):
assert_allclose(pair[0].parameters, pair[1].parameters)
class TestICheb2D:
"""
Tests 2D Chebyshev polynomial fitting
Create a 2D polynomial (z) using Polynomial2DModel and default coefficients
Fit z using a ICheb2D model
Evaluate the ICheb2D polynomial and compare with the initial z
"""
def setup_class(self):
self.pmodel = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
self.z = self.pmodel(self.x, self.y)
self.cheb2 = models.Chebyshev2D(2, 2)
self.fitter = LinearLSQFitter()
def test_default_params(self):
self.cheb2.parameters = np.arange(9)
p = np.array([1344., 1772., 400., 1860., 2448., 552., 432., 568.,
128.])
z = self.cheb2(self.x, self.y)
model = self.fitter(self.cheb2, self.x, self.y, z)
assert_almost_equal(model.parameters, p)
def test_poly2D_cheb2D(self):
model = self.fitter(self.cheb2, self.x, self.y, self.z)
z1 = model(self.x, self.y)
assert_almost_equal(self.z, z1)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_chebyshev2D_nonlinear_fitting(self, fitter):
fitter = fitter()
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
model = fitter(cheb2d, self.x, self.y, z)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_chebyshev2D_nonlinear_fitting_with_weights(self, fitter):
fitter = fitter()
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
weights = np.ones_like(self.y)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
model = fitter(cheb2d, self.x, self.y, z, weights=weights)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
class TestJointFitter:
"""
Tests the joint fitting routine using 2 gaussian models
"""
def setup_class(self):
"""
Create 2 gaussian models and some data with noise.
Create a fitter for the two models keeping the amplitude parameter
common for the two models.
"""
self.g1 = models.Gaussian1D(10, mean=14.9, stddev=.3)
self.g2 = models.Gaussian1D(10, mean=13, stddev=.4)
self.jf = JointFitter([self.g1, self.g2],
{self.g1: ['amplitude'],
self.g2: ['amplitude']}, [9.8])
self.x = np.arange(10, 20, .1)
y1 = self.g1(self.x)
y2 = self.g2(self.x)
with NumpyRNGContext(_RANDOM_SEED):
n = np.random.randn(100)
self.ny1 = y1 + 2 * n
self.ny2 = y2 + 2 * n
self.jf(self.x, self.ny1, self.x, self.ny2)
def test_joint_parameter(self):
"""
Tests that the amplitude of the two models is the same
"""
assert_allclose(self.jf.fitparams[0], self.g1.parameters[0])
assert_allclose(self.jf.fitparams[0], self.g2.parameters[0])
def test_joint_fitter(self):
"""
Tests the fitting routine with similar procedure.
Compares the fitted parameters.
"""
p1 = [14.9, .3]
p2 = [13, .4]
A = 9.8
p = np.r_[A, p1, p2]
def model(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errfunc(p, x1, y1, x2, y2):
return np.ravel(np.r_[model(p[0], p[1:3], x1) - y1,
model(p[0], p[3:], x2) - y2])
coeff, _ = optimize.leastsq(errfunc, p,
args=(self.x, self.ny1, self.x, self.ny2))
assert_allclose(coeff, self.jf.fitparams, rtol=10 ** (-2))
class TestLinearLSQFitter:
def test_compound_model_raises_error(self):
"""Test that if an user tries to use a compound model, raises an error"""
with pytest.raises(ValueError) as excinfo:
init_model1 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model2 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model_comp = init_model1 + init_model2
x = np.arange(10)
y = init_model_comp(x, model_set_axis=False)
fitter = LinearLSQFitter()
_ = fitter(init_model_comp, x, y)
assert "Model must be simple, not compound" in str(excinfo.value)
def test_chebyshev1D(self):
"""Tests fitting a 1D Chebyshev polynomial to some real world data."""
test_file = get_pkg_data_filename(os.path.join('data',
'idcompspec.fits'))
with open(test_file) as f:
lines = f.read()
reclist = lines.split('begin')
record = irafutil.IdentifyRecord(reclist[1])
coeffs = record.coeff
order = int(record.fields['order'])
initial_model = models.Chebyshev1D(order - 1,
domain=record.get_range())
fitter = LinearLSQFitter()
fitted_model = fitter(initial_model, record.x, record.z)
assert_allclose(fitted_model.parameters, np.array(coeffs),
rtol=10e-2)
def test_linear_fit_model_set(self):
"""Tests fitting multiple models simultaneously."""
init_model = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected,
rtol=1e-1)
def test_linear_fit_2d_model_set(self):
"""Tests fitted multiple 2-D models simultaneously."""
init_model = models.Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)
x = np.arange(10)
y = np.arange(10)
z_expected = init_model(x, y, model_set_axis=False)
assert z_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
z = z_expected + np.random.normal(0, 0.01, size=z_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model(x, y, model_set_axis=False), z_expected,
rtol=1e-1)
def test_linear_fit_fixed_parameter(self):
"""
Tests fitting a polynomial model with a fixed parameter (issue #6135).
"""
init_model = models.Polynomial1D(degree=2, c1=1)
init_model.c1.fixed = True
x = np.arange(10)
y = 2 + x + 0.5*x*x
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [2., 1., 0.5], atol=1e-14)
def test_linear_fit_model_set_fixed_parameter(self):
"""
Tests fitting a polynomial model set with a fixed parameter (#6135).
"""
init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)
init_model.c1.fixed = True
x = np.arange(10)
yy = np.array([2 + x + 0.5*x*x, -2*x])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.c0, [2., 0.], atol=1e-14)
assert_allclose(fitted_model.c1, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c2, [0.5, 0.], atol=1e-14)
def test_linear_fit_2d_model_set_fixed_parameters(self):
"""
Tests fitting a 2d polynomial model set with fixed parameters (#6135).
"""
init_model = models.Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
def test_linear_fit_model_set_masked_values(self):
"""
Tests model set fitting with masked value(s) (#4824, #6819).
"""
# NB. For single models, there is an equivalent doctest.
init_model = models.Polynomial1D(degree=1, n_models=2)
x = np.arange(10)
y = np.ma.masked_array([2*x+1, x-2], mask=np.zeros_like([x, x]))
y[0, 7] = 100. # throw off fit coefficients if unmasked
y.mask[0, 7] = True
y[1, 1:3] = -100.
y.mask[1, 1:3] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.c0, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c1, [2., 1.], atol=1e-14)
def test_linear_fit_2d_model_set_masked_values(self):
"""
Tests 2D model set fitting with masked value(s) (#4824, #6819).
"""
init_model = models.Polynomial2D(1, n_models=2)
x, y = np.mgrid[0:5, 0:5]
z = np.ma.masked_array([2*x+3*y+1, x-0.5*y-2],
mask=np.zeros_like([x, x]))
z[0, 3, 1] = -1000. # throw off fit coefficients if unmasked
z.mask[0, 3, 1] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model.c0_0, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c1_0, [2., 1.], atol=1e-14)
assert_allclose(fitted_model.c0_1, [3., -0.5], atol=1e-14)
@pytest.mark.skipif('not HAS_SCIPY')
class TestNonLinearFitters:
"""Tests non-linear least squares fitting and the SLSQP algorithm."""
def setup_class(self):
self.initial_values = [100, 5, 1]
self.xdata = np.arange(0, 10, 0.1)
sigma = 4. * np.ones_like(self.xdata)
with NumpyRNGContext(_RANDOM_SEED):
yerror = np.random.normal(0, sigma)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
self.ydata = func(self.initial_values, self.xdata) + yerror
self.gauss = models.Gaussian1D(100, 5, stddev=1)
@pytest.mark.parametrize('fitter0', non_linear_fitters)
@pytest.mark.parametrize('fitter1', non_linear_fitters)
def test_estimated_vs_analytic_deriv(self, fitter0, fitter1):
"""
Runs `LevMarLSQFitter` and `TRFLSQFitter` with estimated and
analytic derivatives of a `Gaussian1D`.
"""
fitter0 = fitter0()
model = fitter0(self.gauss, self.xdata, self.ydata)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
fitter1 = fitter1()
emodel = fitter1(g1e, self.xdata, self.ydata, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
@pytest.mark.parametrize('fitter0', non_linear_fitters)
@pytest.mark.parametrize('fitter1', non_linear_fitters)
def test_estimated_vs_analytic_deriv_with_weights(self, fitter0, fitter1):
"""
Runs `LevMarLSQFitter` and `TRFLSQFitter` with estimated and
analytic derivatives of a `Gaussian1D`.
"""
weights = 1.0 / (self.ydata / 10.)
fitter0 = fitter0()
model = fitter0(self.gauss, self.xdata, self.ydata, weights=weights)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
fitter1 = fitter1()
emodel = fitter1(g1e, self.xdata, self.ydata, weights=weights, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_with_optimize(self, fitter):
"""
Tests results from `LevMarLSQFitter` and `TRFLSQFitter` against
`scipy.optimize.leastsq`.
"""
fitter = fitter()
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errfunc(p, x, y):
return func(p, x) - y
result = optimize.leastsq(errfunc, self.initial_values,
args=(self.xdata, self.ydata))
assert_allclose(model.parameters, result[0], rtol=10 ** (-3))
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_with_weights(self, fitter):
"""
Tests results from `LevMarLSQFitter` and `TRFLSQFitter` with weights.
"""
fitter = fitter()
# part 1: weights are equal to 1
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=np.ones_like(self.xdata))
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
# part 2: weights are 0 or 1 (effectively, they are a mask)
weights = np.zeros_like(self.xdata)
weights[::2] = 1.
mask = weights >= 1.
model = fitter(self.gauss, self.xdata[mask], self.ydata[mask],
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=weights)
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
@pytest.mark.filterwarnings(r'ignore:.* Maximum number of iterations reached')
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, '
r'clipping to bounds')
@pytest.mark.parametrize('fitter_class', fitters)
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_fitter_against_LevMar(self, fitter_class, fitter):
"""
Tests results from non-linear fitters against `LevMarLSQFitter`
and `TRFLSQFitter`
"""
fitter = fitter()
fitter_cls = fitter_class()
# This emits a warning from fitter that we need to ignore with
# pytest.mark.filterwarnings above.
new_model = fitter_cls(self.gauss, self.xdata, self.ydata)
model = fitter(self.gauss, self.xdata, self.ydata)
assert_allclose(model.parameters, new_model.parameters,
rtol=10 ** (-4))
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, '
r'clipping to bounds')
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_LSQ_SLSQP_with_constraints(self, fitter):
"""
Runs `LevMarLSQFitter`/`TRFLSQFitter` and `SLSQPLSQFitter` on a
model with constraints.
"""
fitter = fitter()
g1 = models.Gaussian1D(100, 5, stddev=1)
g1.mean.fixed = True
fslsqp = SLSQPLSQFitter()
slsqp_model = fslsqp(g1, self.xdata, self.ydata)
model = fitter(g1, self.xdata, self.ydata)
assert_allclose(model.parameters, slsqp_model.parameters,
rtol=10 ** (-4))
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_non_linear_lsq_fitter_with_weights(self, fitter):
"""
Tests that issue #11581 has been solved.
"""
fitter = fitter()
np.random.seed(42)
norder = 2
fitter2 = LinearLSQFitter()
model = models.Polynomial1D(norder)
npts = 10000
c = [2.0, -10.0, 7.0]
tw = np.random.uniform(0.0, 10.0, npts)
tx = np.random.uniform(0.0, 10.0, npts)
ty = c[0] + c[1] * tx + c[2] * (tx ** 2)
ty += np.random.normal(0.0, 1.5, npts)
with pytest.warns(AstropyUserWarning, match=r'Model is linear in parameters'):
tf1 = fitter(model, tx, ty, weights=tw)
tf2 = fitter2(model, tx, ty, weights=tw)
assert_allclose(tf1.parameters, tf2.parameters,
atol=10 ** (-16))
assert_allclose(tf1.parameters, c,
rtol=10 ** (-2), atol=10 ** (-2))
model = models.Gaussian1D()
if isinstance(fitter, TRFLSQFitter) or isinstance(fitter, LMLSQFitter):
with pytest.warns(AstropyUserWarning, match=r'The fit may be unsuccessful; *.'):
fitter(model, tx, ty, weights=tw)
else:
fitter(model, tx, ty, weights=tw)
model = models.Polynomial2D(norder)
nxpts = 100
nypts = 150
npts = nxpts * nypts
c = [1.0, 4.0, 7.0, -8.0, -9.0, -3.0]
tw = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
tx = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
ty = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
tz = c[0] + c[1] * tx + c[2] * (tx ** 2) + c[3] * ty + c[4] * (ty ** 2) + c[5] * tx * ty
tz += np.random.normal(0.0, 1.5, npts).reshape(nxpts, nypts)
with pytest.warns(AstropyUserWarning, match=r'Model is linear in parameters'):
tf1 = fitter(model, tx, ty, tz, weights=tw)
tf2 = fitter2(model, tx, ty, tz, weights=tw)
assert_allclose(tf1.parameters, tf2.parameters,
atol=10 ** (-16))
assert_allclose(tf1.parameters, c,
rtol=10 ** (-2), atol=10 ** (-2))
def test_simplex_lsq_fitter(self):
"""A basic test for the `SimplexLSQ` fitter."""
class Rosenbrock(Fittable2DModel):
a = Parameter()
b = Parameter()
@staticmethod
def evaluate(x, y, a, b):
return (a - x) ** 2 + b * (y - x ** 2) ** 2
x = y = np.linspace(-3.0, 3.0, 100)
with NumpyRNGContext(_RANDOM_SEED):
z = Rosenbrock.evaluate(x, y, 1.0, 100.0)
z += np.random.normal(0., 0.1, size=z.shape)
fitter = SimplexLSQFitter()
r_i = Rosenbrock(1, 100)
r_f = fitter(r_i, x, y, z)
assert_allclose(r_f.parameters, [1.0, 100.0], rtol=1e-2)
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_param_cov(self, fitter):
"""
Tests that the 'param_cov' fit_info entry gets the right answer for
*linear* least squares, where the answer is exact
"""
fitter = fitter()
a = 2
b = 100
with NumpyRNGContext(_RANDOM_SEED):
x = np.linspace(0, 1, 100)
# y scatter is amplitude ~1 to make sure covarience is
# non-negligible
y = x*a + b + np.random.randn(len(x))
# first compute the ordinary least squares covariance matrix
X = np.vstack([x, np.ones(len(x))]).T
beta = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), y.T)
s2 = (np.sum((y - np.matmul(X, beta).ravel())**2) /
(len(y) - len(beta)))
olscov = np.linalg.inv(np.matmul(X.T, X)) * s2
# now do the non-linear least squares fit
mod = models.Linear1D(a, b)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fmod = fitter(mod, x, y)
assert_allclose(fmod.parameters, beta.ravel())
assert_allclose(olscov, fitter.fit_info['param_cov'])
class TestEntryPoint:
"""Tests population of fitting with entry point fitters"""
def successfulimport(self):
# This should work
class goodclass(Fitter):
__name__ = "GoodClass"
return goodclass
def raiseimporterror(self):
# This should fail as it raises an Import Error
raise ImportError
def returnbadfunc(self):
def badfunc():
# This should import but it should fail type check
pass
return badfunc
def returnbadclass(self):
# This should import But it should fail subclass type check
class badclass:
pass
return badclass
def test_working(self):
"""This should work fine"""
mock_entry_working = mock.create_autospec(EntryPoint)
mock_entry_working.name = "Working"
mock_entry_working.load = self.successfulimport
populate_entry_points([mock_entry_working])
def test_import_error(self):
"""This raises an import error on load to test that it is handled correctly"""
mock_entry_importerror = mock.create_autospec(EntryPoint)
mock_entry_importerror.name = "IErr"
mock_entry_importerror.load = self.raiseimporterror
with pytest.warns(AstropyUserWarning, match=r".*ImportError.*"):
populate_entry_points([mock_entry_importerror])
def test_bad_func(self):
"""This returns a function which fails the type check"""
mock_entry_badfunc = mock.create_autospec(EntryPoint)
mock_entry_badfunc.name = "BadFunc"
mock_entry_badfunc.load = self.returnbadfunc
with pytest.warns(AstropyUserWarning, match=r".*Class.*"):
populate_entry_points([mock_entry_badfunc])
def test_bad_class(self):
"""This returns a class which doesn't inherient from fitter """
mock_entry_badclass = mock.create_autospec(EntryPoint)
mock_entry_badclass.name = "BadClass"
mock_entry_badclass.load = self.returnbadclass
with pytest.warns(AstropyUserWarning, match=r".*BadClass.*"):
populate_entry_points([mock_entry_badclass])
@pytest.mark.skipif('not HAS_SCIPY')
class Test1DFittingWithOutlierRemoval:
def setup_class(self):
self.x = np.linspace(-5., 5., 200)
self.model_params = (3.0, 1.3, 0.8)
def func(p, x):
return p[0]*np.exp(-0.5*(x - p[1])**2/p[2]**2)
self.y = func(self.model_params, self.x)
@pytest.mark.filterwarnings('ignore:The fit may be unsuccessful')
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, '
r'clipping to bounds')
@pytest.mark.parametrize('fitter', non_linear_fitters + fitters)
def test_with_fitters_and_sigma_clip(self, fitter):
import scipy.stats as stats
fitter = fitter()
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.x.shape)
y = self.y + (np.random.normal(0., 0.2, self.x.shape) +
c*np.random.normal(3.0, 5.0, self.x.shape))
g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
fit = FittingWithOutlierRemoval(fitter, sigma_clip,
niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
@pytest.mark.skipif('not HAS_SCIPY')
class Test2DFittingWithOutlierRemoval:
def setup_class(self):
self.y, self.x = np.mgrid[-3:3:128j, -3:3:128j]
self.model_params = (3.0, 1.0, 0.0, 0.8, 0.8)
def Gaussian_2D(p, pos):
return p[0]*np.exp(-0.5*(pos[0] - p[2])**2 / p[4]**2 -
0.5*(pos[1] - p[1])**2 / p[3]**2)
self.z = Gaussian_2D(self.model_params, np.array([self.y, self.x]))
def initial_guess(self, data, pos):
y = pos[0]
x = pos[1]
"""computes the centroid of the data as the initial guess for the
center position"""
wx = x * data
wy = y * data
total_intensity = np.sum(data)
x_mean = np.sum(wx) / total_intensity
y_mean = np.sum(wy) / total_intensity
x_to_pixel = x[0].size / (x[x[0].size - 1][x[0].size - 1] - x[0][0])
y_to_pixel = y[0].size / (y[y[0].size - 1][y[0].size - 1] - y[0][0])
x_pos = np.around(x_mean * x_to_pixel + x[0].size / 2.).astype(int)
y_pos = np.around(y_mean * y_to_pixel + y[0].size / 2.).astype(int)
amplitude = data[y_pos][x_pos]
return amplitude, x_mean, y_mean
@pytest.mark.filterwarnings('ignore:The fit may be unsuccessful')
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, '
r'clipping to bounds')
@pytest.mark.parametrize('fitter', non_linear_fitters + fitters)
def test_with_fitters_and_sigma_clip(self, fitter):
import scipy.stats as stats
fitter = fitter()
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.z.shape)
z = self.z + (np.random.normal(0., 0.2, self.z.shape) +
c*np.random.normal(self.z, 2.0, self.z.shape))
guess = self.initial_guess(self.z, np.array([self.y, self.x]))
g2_init = models.Gaussian2D(amplitude=guess[0], x_mean=guess[1],
y_mean=guess[2], x_stddev=0.75,
y_stddev=1.25)
fit = FittingWithOutlierRemoval(fitter, sigma_clip,
niter=3, sigma=3.)
fitted_model, _ = fit(g2_init, self.x, self.y, z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
def test_1d_set_fitting_with_outlier_removal():
"""Test model set fitting with outlier removal (issue #6819)"""
poly_set = models.Polynomial1D(2, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(),
sigma_clip, sigma=2.5, niter=3,
cenfunc=np.ma.mean, stdfunc=np.ma.std)
x = np.arange(10)
y = np.array([2.5*x - 4, 2*x*x + x + 10])
y[1, 5] = -1000 # outlier
poly_set, filt_y = fitter(poly_set, x, y)
assert_allclose(poly_set.c0, [-4., 10.], atol=1e-14)
assert_allclose(poly_set.c1, [2.5, 1.], atol=1e-14)
assert_allclose(poly_set.c2, [0., 2.], atol=1e-14)
def test_2d_set_axis_2_fitting_with_outlier_removal():
"""Test fitting 2D model set (axis 2) with outlier removal (issue #6819)"""
poly_set = models.Polynomial2D(1, n_models=2, model_set_axis=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(),
sigma_clip, sigma=2.5, niter=3,
cenfunc=np.ma.mean, stdfunc=np.ma.std)
y, x = np.mgrid[0:5, 0:5]
z = np.rollaxis(np.array([x+y, 1-0.1*x+0.2*y]), 0, 3)
z[3, 3:5, 0] = 100. # outliers
poly_set, filt_z = fitter(poly_set, x, y, z)
assert_allclose(poly_set.c0_0, [[[0., 1.]]], atol=1e-14)
assert_allclose(poly_set.c1_0, [[[1., -0.1]]], atol=1e-14)
assert_allclose(poly_set.c0_1, [[[1., 0.2]]], atol=1e-14)
@pytest.mark.skipif('not HAS_SCIPY')
class TestWeightedFittingWithOutlierRemoval:
"""Issue #7020 """
def setup_class(self):
# values of x,y not important as we fit y(x,y) = p0 model here
self.y, self.x = np.mgrid[0:20, 0:20]
self.z = np.mod(self.x + self.y, 2) * 2 - 1 # -1,1 chessboard
self.weights = np.mod(self.x + self.y, 2) * 2 + 1 # 1,3 chessboard
self.z[0, 0] = 1000.0 # outlier
self.z[0, 1] = 1000.0 # outlier
self.x1d = self.x.flatten()
self.z1d = self.z.flatten()
self.weights1d = self.weights.flatten()
def test_1d_without_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d)
assert_allclose(fit.parameters[0], self.z1d.mean(), atol=10**(-2))
def test_1d_without_weights_with_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, mask = fitter(model, self.x1d, self.z1d)
assert((~mask).sum() == self.z1d.size - 2)
assert(mask[0] and mask[1])
assert_allclose(fit.parameters[0], 0.0, atol=10**(-2)) # with removed outliers mean is 0.0
def test_1d_with_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_1d_with_weights_with_sigma_clip(self):
"""
smoke test for #7020 - fails without fitting.py
patch because weights does not propagate
"""
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, filtered = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
# outliers didn't pull it out of [-1:1] because they had been removed
assert(fit.parameters[0] < 1.0)
def test_1d_set_with_common_weights_with_sigma_clip(self):
"""added for #6819 (1D model set with weights in common)"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
z1d = np.array([self.z1d, self.z1d])
fit, filtered = fitter(model, self.x1d, z1d, weights=self.weights1d)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_1d_set_with_weights_with_sigma_clip(self):
"""1D model set with separate weights"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
z1d = np.array([self.z1d, self.z1d])
weights = np.array([self.weights1d, self.weights1d])
fit, filtered = fitter(model, self.x1d, z1d, weights=weights)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_2d_without_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x, self.y, self.z)
assert_allclose(fit.parameters[0], self.z.mean(), atol=10**(-2))
def test_2d_without_weights_with_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, mask = fitter(model, self.x, self.y, self.z)
assert((~mask).sum() == self.z.size - 2)
assert(mask[0, 0] and mask[0, 1])
assert_allclose(fit.parameters[0], 0.0, atol=10**(-2))
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_2d_with_weights_without_sigma_clip(self, fitter):
fitter = fitter()
model = models.Polynomial2D(0)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_2d_linear_with_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LinearLSQFitter() # LinearLSQFitter doesn't handle weights properly in 2D
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
@pytest.mark.parametrize('base_fitter', non_linear_fitters)
def test_2d_with_weights_with_sigma_clip(self, base_fitter):
"""smoke test for #7020 - fails without fitting.py patch because
weights does not propagate"""
base_fitter = base_fitter()
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(base_fitter, sigma_clip,
niter=3, sigma=3.)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit, _ = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
# outliers didn't pull it out of [-1:1] because they had been removed
assert(fit.parameters[0] < 1.0)
def test_2d_linear_with_weights_with_sigma_clip(self):
"""same as test above with a linear fitter."""
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, _ = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
# outliers didn't pull it out of [-1:1] because they had been removed
assert(fit.parameters[0] < 1.0)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_fitters_with_weights(fitter):
"""Issue #5737 """
fitter = fitter()
if isinstance(fitter, _NLLSQFitter):
pytest.xfail("This test is poorly designed and causes issues for "
"scipy.optimize.least_squares based fitters")
Xin, Yin = np.mgrid[0:21, 0:21]
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
# Non-linear model
g2 = models.Gaussian2D(10, 10, 9, 2, 3)
z = g2(Xin, Yin)
gmod = fitter(models.Gaussian2D(15, 7, 8, 1.3, 1.2), Xin, Yin, z + zsig)
assert_allclose(gmod.parameters, g2.parameters, atol=10 ** (-2))
# Linear model
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10)/1.2
z = p2(Xin, Yin)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig)
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
def test_linear_fitter_with_weights():
"""Regression test for #7035"""
Xin, Yin = np.mgrid[0:21, 0:21]
fitter = LinearLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10)/1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig**(-2))
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
def test_linear_fitter_with_weights_flat():
"""Same as the above #7035 test but with flattened inputs"""
Xin, Yin = np.mgrid[0:21, 0:21]
Xin, Yin = Xin.flatten(), Yin.flatten()
fitter = LinearLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10)/1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig**(-2))
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings('ignore:The fit may be unsuccessful')
@pytest.mark.parametrize('fitter', non_linear_fitters + fitters)
def test_fitters_interface(fitter):
"""
Test that ``**kwargs`` work with all optimizers.
This is a basic smoke test.
"""
fitter = fitter()
model = models.Gaussian1D(10, 4, .3)
x = np.arange(21)
y = model(x)
if isinstance(fitter, SimplexLSQFitter):
kwargs = {'maxiter': 79, 'verblevel': 1, 'acc': 1e-6}
else:
kwargs = {'maxiter': 77, 'verblevel': 1, 'epsilon': 1e-2, 'acc': 1e-6}
if isinstance(fitter, LevMarLSQFitter) or isinstance(fitter, _NLLSQFitter):
kwargs.pop('verblevel')
_ = fitter(model, x, y, **kwargs)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter_class', [SLSQPLSQFitter, SimplexLSQFitter])
def test_optimizers(fitter_class):
fitter = fitter_class()
# Test maxiter
assert fitter._opt_method.maxiter == 100
fitter._opt_method.maxiter = 1000
assert fitter._opt_method.maxiter == 1000
# Test eps
assert fitter._opt_method.eps == np.sqrt(np.finfo(float).eps)
fitter._opt_method.eps = 1e-16
assert fitter._opt_method.eps == 1e-16
# Test acc
assert fitter._opt_method.acc == 1e-7
fitter._opt_method.acc = 1e-16
assert fitter._opt_method.acc == 1e-16
# Test repr
assert repr(fitter._opt_method) == f"{fitter._opt_method.__class__.__name__}()"
fitparams = mk.MagicMock()
final_func_val = mk.MagicMock()
numiter = mk.MagicMock()
funcalls = mk.MagicMock()
exit_mode = 1
mess = mk.MagicMock()
xtol = mk.MagicMock()
if fitter_class == SLSQPLSQFitter:
return_value = (fitparams, final_func_val, numiter, exit_mode, mess)
fit_info = {
'final_func_val': final_func_val,
'numiter': numiter,
'exit_mode': exit_mode,
'message': mess
}
else:
return_value = (fitparams, final_func_val, numiter, funcalls, exit_mode)
fit_info = {
'final_func_val': final_func_val,
'numiter': numiter,
'exit_mode': exit_mode,
'num_function_calls': funcalls
}
with mk.patch.object(fitter._opt_method.__class__, 'opt_method',
return_value=return_value):
with pytest.warns(AstropyUserWarning, match=r"The fit may be unsuccessful; .*"):
assert (fitparams, fit_info) == fitter._opt_method(mk.MagicMock(), mk.MagicMock(),
mk.MagicMock(), xtol=xtol)
assert fit_info == fitter._opt_method.fit_info
if isinstance(fitter, SLSQPLSQFitter):
fitter._opt_method.acc == 1e-16
else:
fitter._opt_method.acc == xtol
@mk.patch.multiple(Optimization, __abstractmethods__=set())
def test_Optimization_abstract_call():
optimization = Optimization(mk.MagicMock())
with pytest.raises(NotImplementedError) as err:
optimization()
assert str(err.value) == "Subclasses should implement this method"
def test_fitting_with_outlier_removal_niter():
"""
Test that FittingWithOutlierRemoval stops prior to reaching niter if the
set of masked points has converged and correctly reports the actual number
of iterations performed.
"""
# 2 rows with some noise around a constant level and 1 deviant point:
x = np.arange(25)
with NumpyRNGContext(_RANDOM_SEED):
y = np.random.normal(loc=10., scale=1., size=(2, 25))
y[0, 14] = 100.
# Fit 2 models with up to 5 iterations (should only take 2):
fitter = FittingWithOutlierRemoval(
fitter=LinearLSQFitter(), outlier_func=sigma_clip, niter=5,
sigma_lower=3., sigma_upper=3., maxiters=1
)
model, mask = fitter(models.Chebyshev1D(2, n_models=2), x, y)
# Confirm that only the deviant point was rejected, in 2 iterations:
assert_equal(np.where(mask), [[0], [14]])
assert fitter.fit_info['niter'] == 2
# Refit just the first row without any rejection iterations, to ensure
# there are no regressions for that special case:
fitter = FittingWithOutlierRemoval(
fitter=LinearLSQFitter(), outlier_func=sigma_clip, niter=0,
sigma_lower=3., sigma_upper=3., maxiters=1
)
model, mask = fitter(models.Chebyshev1D(2), x, y[0])
# Confirm that there were no iterations or rejected points:
assert mask.sum() == 0
assert fitter.fit_info['niter'] == 0
@pytest.mark.skipif('not HAS_SCIPY')
class TestFittingUncertanties:
"""
Test that parameter covariance is calculated correctly for the fitters
that do so (currently LevMarLSQFitter, LinearLSQFitter).
"""
example_1D_models = [models.Polynomial1D(2), models.Linear1D()]
example_1D_sets = [models.Polynomial1D(2, n_models=2, model_set_axis=False),
models.Linear1D(n_models=2, slope=[1., 1.], intercept=[0, 0])]
def setup_class(self):
np.random.seed(619)
self.x = np.arange(10)
self.x_grid = np.random.randint(0, 100, size=100).reshape(10, 10)
self.y_grid = np.random.randint(0, 100, size=100).reshape(10, 10)
self.rand_grid = np.random.random(100).reshape(10, 10)
self.rand = self.rand_grid[0]
@pytest.mark.parametrize(('single_model', 'model_set'),
list(zip(example_1D_models, example_1D_sets)))
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_1d_models(self, single_model, model_set, fitter):
""" Test that fitting uncertainties are computed correctly for 1D models
and 1D model sets. Use covariance/stds given by LevMarLSQFitter as
a benchmark since they are returned by the numpy fitter.
"""
fitter = fitter(calc_uncertainties=True)
linlsq_fitter = LinearLSQFitter(calc_uncertainties=True)
# test 1D single models
# fit single model w/ nonlinear fitter
y = single_model(self.x) + self.rand
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit_model = fitter(single_model, self.x, y)
cov_model = fit_model.cov_matrix.cov_matrix
# fit single model w/ linlsq fitter
fit_model_linlsq = linlsq_fitter(single_model, self.x, y)
cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix
# check covariance, stds computed correctly computed
assert_allclose(cov_model_linlsq, cov_model)
assert_allclose(np.sqrt(np.diag(cov_model_linlsq)),
fit_model_linlsq.stds.stds)
# now test 1D model sets
# fit set of models w/ linear fitter
y = model_set(self.x, model_set_axis=False) + np.array([self.rand, self.rand])
fit_1d_set_linlsq = linlsq_fitter(model_set, self.x, y)
cov_1d_set_linlsq = [j.cov_matrix for j in
fit_1d_set_linlsq.cov_matrix]
# make sure cov matrix from single model fit w/ levmar fitter matches
# the cov matrix of first model in the set
assert_allclose(cov_1d_set_linlsq[0], cov_model)
assert_allclose(np.sqrt(np.diag(cov_1d_set_linlsq[0])),
fit_1d_set_linlsq.stds[0].stds)
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_2d_models(self, fitter):
"""
Test that fitting uncertainties are computed correctly for 2D models
and 2D model sets. Use covariance/stds given by LevMarLSQFitter as
a benchmark since they are returned by the numpy fitter.
"""
fitter = fitter(calc_uncertainties=True)
linlsq_fitter = LinearLSQFitter(calc_uncertainties=True)
single_model = models.Polynomial2D(2, c0_0=2)
model_set = models.Polynomial2D(degree=2, n_models=2, c0_0=[2, 3],
model_set_axis=False)
# fit single model w/ nonlinear fitter
z_grid = single_model(self.x_grid, self.y_grid) + self.rand_grid
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit_model = fitter(single_model, self.x_grid, self.y_grid, z_grid)
cov_model = fit_model.cov_matrix.cov_matrix
# fit single model w/ nonlinear fitter
fit_model_linlsq = linlsq_fitter(single_model, self.x_grid,
self.y_grid, z_grid)
cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix
assert_allclose(cov_model, cov_model_linlsq)
assert_allclose(np.sqrt(np.diag(cov_model_linlsq)),
fit_model_linlsq.stds.stds)
# fit 2d model set
z_grid = model_set(self.x_grid, self.y_grid) + np.array((self.rand_grid,
self.rand_grid))
fit_2d_set_linlsq = linlsq_fitter(model_set, self.x_grid, self.y_grid,
z_grid)
cov_2d_set_linlsq = [j.cov_matrix for j in fit_2d_set_linlsq.cov_matrix]
# make sure cov matrix from single model fit w/ levmar fitter matches
# the cov matrix of first model in the set
assert_allclose(cov_2d_set_linlsq[0], cov_model)
assert_allclose(np.sqrt(np.diag(cov_2d_set_linlsq[0])),
fit_2d_set_linlsq.stds[0].stds)
def test_covariance_std_printing_indexing(self, capsys):
"""
Test printing methods and indexing.
"""
# test str representation for Covariance/stds
fitter = LinearLSQFitter(calc_uncertainties=True)
mod = models.Linear1D()
fit_mod = fitter(mod, self.x, mod(self.x)+self.rand)
print(fit_mod.cov_matrix)
captured = capsys.readouterr()
assert "slope | 0.001" in captured.out
assert "intercept| -0.005, 0.03" in captured.out
print(fit_mod.stds)
captured = capsys.readouterr()
assert "slope | 0.032" in captured.out
assert "intercept| 0.173" in captured.out
# test 'pprint' for Covariance/stds
print(fit_mod.cov_matrix.pprint(round_val=5, max_lines=1))
captured = capsys.readouterr()
assert "slope | 0.00105" in captured.out
assert "intercept" not in captured.out
print(fit_mod.stds.pprint(max_lines=1, round_val=5))
captured = capsys.readouterr()
assert "slope | 0.03241" in captured.out
assert "intercept" not in captured.out
# test indexing for Covariance class.
assert fit_mod.cov_matrix[0, 0] == fit_mod.cov_matrix['slope', 'slope']
# test indexing for stds class.
assert fit_mod.stds[1] == fit_mod.stds['intercept']
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_non_finite_error(fitter):
"""Regression test error introduced to solve issues #3575 and #12809"""
x = np.array([1, 2, 3, 4, 5, np.nan, 7, np.inf])
y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, 16])
m_init = models.Gaussian1D()
fit = fitter()
# Raise warning, notice fit fails due to nans
with pytest.raises(NonFiniteValueError, match=r"Objective function has encountered.*"):
fit(m_init, x, y)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_non_finite_filter_1D(fitter):
"""Regression test filter introduced to remove non-finte values from data"""
x = np.array([1, 2, 3, 4, 5, 6, 7, 8])
y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, np.inf])
m_init = models.Gaussian1D()
fit = fitter()
with pytest.warns(AstropyUserWarning,
match=r"Non-Finite input data has been removed by the fitter"):
fit(m_init, x, y, filter_non_finite=True)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_non_finite_filter_2D(fitter):
"""Regression test filter introduced to remove non-finte values from data"""
x, y = np.mgrid[0:10, 0:10]
m_true = models.Gaussian2D(amplitude=1, x_mean=5, y_mean=5, x_stddev=2, y_stddev=2)
with NumpyRNGContext(_RANDOM_SEED):
z = m_true(x, y) + np.random.rand(*x.shape)
z[0, 0] = np.nan
z[3, 3] = np.inf
z[7, 5] = -np.inf
m_init = models.Gaussian2D()
fit = fitter()
with pytest.warns(AstropyUserWarning,
match=r"Non-Finite input data has been removed by the fitter"):
fit(m_init, x, y, z, filter_non_finite=True)
|
f28105ee94da23b4d054d8507a92511c971806543c12a91ad6315a4729449efa | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import unittest.mock as mk
import numpy as np
import pytest
import astropy.units as u
from astropy.coordinates import SpectralCoord
from astropy.modeling.bounding_box import (
CompoundBoundingBox, ModelBoundingBox, _BaseInterval, _BaseSelectorArgument, _BoundingDomain,
_ignored_interval, _Interval, _SelectorArgument, _SelectorArguments)
from astropy.modeling.core import Model, fix_inputs
from astropy.modeling.models import Gaussian1D, Gaussian2D, Identity, Polynomial2D, Scale, Shift
class Test_Interval:
def test_create(self):
lower = mk.MagicMock()
upper = mk.MagicMock()
interval = _Interval(lower, upper)
assert isinstance(interval, _BaseInterval)
assert interval.lower == lower
assert interval.upper == upper
assert interval == (lower, upper)
assert interval.__repr__() == f"Interval(lower={lower}, upper={upper})"
def test_copy(self):
interval = _Interval(0.5, 1.5)
copy = interval.copy()
assert interval == copy
assert id(interval) != id(copy)
# Same float values have will have same id
assert interval.lower == copy.lower
assert id(interval.lower) == id(copy.lower)
# Same float values have will have same id
assert interval.upper == copy.upper
assert id(interval.upper) == id(copy.upper)
def test__validate_shape(self):
message = "An interval must be some sort of sequence of length 2"
lower = mk.MagicMock()
upper = mk.MagicMock()
interval = _Interval(lower, upper)
# Passes (2,)
interval._validate_shape((1, 2))
interval._validate_shape([1, 2])
interval._validate_shape((1*u.m, 2*u.m))
interval._validate_shape([1*u.m, 2*u.m])
# Passes (1, 2)
interval._validate_shape(((1, 2),))
interval._validate_shape(([1, 2],))
interval._validate_shape([(1, 2)])
interval._validate_shape([[1, 2]])
interval._validate_shape(((1*u.m, 2*u.m),))
interval._validate_shape(([1*u.m, 2*u.m],))
interval._validate_shape([(1*u.m, 2*u.m)])
interval._validate_shape([[1*u.m, 2*u.m]])
# Passes (2, 0)
interval._validate_shape((mk.MagicMock(), mk.MagicMock()))
interval._validate_shape([mk.MagicMock(), mk.MagicMock()])
# Passes with array inputs:
interval._validate_shape((np.array([-2.5, -3.5]), np.array([2.5, 3.5])))
interval._validate_shape((np.array([-2.5, -3.5, -4.5]),
np.array([2.5, 3.5, 4.5])))
# Fails shape (no units)
with pytest.raises(ValueError) as err:
interval._validate_shape((1, 2, 3))
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape([1, 2, 3])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape([[1, 2, 3], [4, 5, 6]])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape(1)
assert str(err.value) == message
# Fails shape (units)
message = "An interval must be some sort of sequence of length 2"
with pytest.raises(ValueError) as err:
interval._validate_shape((1*u.m, 2*u.m, 3*u.m))
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape([1*u.m, 2*u.m, 3*u.m])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape([[1*u.m, 2*u.m, 3*u.m], [4*u.m, 5*u.m, 6*u.m]])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape(1*u.m)
assert str(err.value) == message
# Fails shape (arrays):
with pytest.raises(ValueError) as err:
interval._validate_shape((np.array([-2.5, -3.5]),
np.array([2.5, 3.5]),
np.array([3, 4])))
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape((np.array([-2.5, -3.5]), [2.5, 3.5]))
assert str(err.value) == message
def test__validate_bounds(self):
# Passes
assert _Interval._validate_bounds(1, 2) == (1, 2)
assert _Interval._validate_bounds(1*u.m, 2*u.m) == (1*u.m, 2*u.m)
interval = _Interval._validate_bounds(np.array([-2.5, -3.5]), np.array([2.5, 3.5]))
assert (interval.lower == np.array([-2.5, -3.5])).all()
assert (interval.upper == np.array([2.5, 3.5])).all()
# Fails
with pytest.warns(RuntimeWarning,
match="Invalid interval: upper bound 1 is strictly "
r"less than lower bound 2\."):
_Interval._validate_bounds(2, 1)
with pytest.warns(RuntimeWarning,
match=r"Invalid interval: upper bound 1\.0 m is strictly "
r"less than lower bound 2\.0 m\."):
_Interval._validate_bounds(2*u.m, 1*u.m)
def test_validate(self):
# Passes
assert _Interval.validate((1, 2)) == (1, 2)
assert _Interval.validate([1, 2]) == (1, 2)
assert _Interval.validate((1*u.m, 2*u.m)) == (1*u.m, 2*u.m)
assert _Interval.validate([1*u.m, 2*u.m]) == (1*u.m, 2*u.m)
assert _Interval.validate(((1, 2),)) == (1, 2)
assert _Interval.validate(([1, 2],)) == (1, 2)
assert _Interval.validate([(1, 2)]) == (1, 2)
assert _Interval.validate([[1, 2]]) == (1, 2)
assert _Interval.validate(((1*u.m, 2*u.m),)) == (1*u.m, 2*u.m)
assert _Interval.validate(([1*u.m, 2*u.m],)) == (1*u.m, 2*u.m)
assert _Interval.validate([(1*u.m, 2*u.m)]) == (1*u.m, 2*u.m)
assert _Interval.validate([[1*u.m, 2*u.m]]) == (1*u.m, 2*u.m)
interval = _Interval.validate((np.array([-2.5, -3.5]),
np.array([2.5, 3.5])))
assert (interval.lower == np.array([-2.5, -3.5])).all()
assert (interval.upper == np.array([2.5, 3.5])).all()
interval = _Interval.validate((np.array([-2.5, -3.5, -4.5]),
np.array([2.5, 3.5, 4.5])))
assert (interval.lower == np.array([-2.5, -3.5, -4.5])).all()
assert (interval.upper == np.array([2.5, 3.5, 4.5])).all()
# Fail shape
with pytest.raises(ValueError):
_Interval.validate((1, 2, 3))
# Fail bounds
with pytest.warns(RuntimeWarning):
_Interval.validate((2, 1))
def test_outside(self):
interval = _Interval.validate((0, 1))
assert (interval.outside(np.linspace(-1, 2, 13)) ==
[True, True, True, True,
False, False, False, False, False,
True, True, True, True]).all()
def test_domain(self):
interval = _Interval.validate((0, 1))
assert (interval.domain(0.25) == np.linspace(0, 1, 5)).all()
def test__ignored_interval(self):
assert _ignored_interval.lower == -np.inf
assert _ignored_interval.upper == np.inf
for num in [0, -1, -100, 3.14, 10**100, -10**100]:
assert not num < _ignored_interval[0]
assert num > _ignored_interval[0]
assert not num > _ignored_interval[1]
assert num < _ignored_interval[1]
assert not (_ignored_interval.outside(np.array([num]))).all()
def test_validate_with_SpectralCoord(self):
"""Regression test for issue #12439"""
lower = SpectralCoord(1, u.um)
upper = SpectralCoord(10, u.um)
interval = _Interval.validate((lower, upper))
assert interval.lower == lower
assert interval.upper == upper
class Test_BoundingDomain:
def setup(self):
class BoundingDomain(_BoundingDomain):
def fix_inputs(self, model, fix_inputs):
super().fix_inputs(model, fixed_inputs=fix_inputs)
def prepare_inputs(self, input_shape, inputs):
super().prepare_inputs(input_shape, inputs)
self.BoundingDomain = BoundingDomain
def test_create(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == 'C'
bounding_box = self.BoundingDomain(model, order='F')
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == 'F'
bounding_box = self.BoundingDomain(Gaussian2D(), ['x'])
assert bounding_box._ignored == [0]
assert bounding_box._order == 'C'
# Error
with pytest.raises(ValueError):
self.BoundingDomain(model, order=mk.MagicMock())
def test_model(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
assert bounding_box._model == model
assert bounding_box.model == model
def test_order(self):
bounding_box = self.BoundingDomain(mk.MagicMock(), order='C')
assert bounding_box._order == 'C'
assert bounding_box.order == 'C'
bounding_box = self.BoundingDomain(mk.MagicMock(), order='F')
assert bounding_box._order == 'F'
assert bounding_box.order == 'F'
bounding_box._order = 'test'
assert bounding_box.order == 'test'
def test_ignored(self):
ignored = [0]
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ['x']
bounding_box = self.BoundingDomain(model, ignored=ignored)
assert bounding_box._ignored == ignored
assert bounding_box.ignored == ignored
def test__get_order(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Success (default 'C')
assert bounding_box._order == 'C'
assert bounding_box._get_order() == 'C'
assert bounding_box._get_order('C') == 'C'
assert bounding_box._get_order('F') == 'F'
# Success (default 'F')
bounding_box._order = 'F'
assert bounding_box._order == 'F'
assert bounding_box._get_order() == 'F'
assert bounding_box._get_order('C') == 'C'
assert bounding_box._get_order('F') == 'F'
# Error
order = mk.MagicMock()
with pytest.raises(ValueError) as err:
bounding_box._get_order(order)
assert str(err.value) == ("order must be either 'C' (C/python order) or "
f"'F' (Fortran/mathematical order), got: {order}.")
def test__get_index(self):
bounding_box = self.BoundingDomain(Gaussian2D())
# Pass input name
assert bounding_box._get_index('x') == 0
assert bounding_box._get_index('y') == 1
# Pass invalid input name
with pytest.raises(ValueError) as err:
bounding_box._get_index('z')
assert str(err.value) == "'z' is not one of the inputs: ('x', 'y')."
# Pass valid index
assert bounding_box._get_index(0) == 0
assert bounding_box._get_index(1) == 1
assert bounding_box._get_index(np.int32(0)) == 0
assert bounding_box._get_index(np.int32(1)) == 1
assert bounding_box._get_index(np.int64(0)) == 0
assert bounding_box._get_index(np.int64(1)) == 1
# Pass invalid index
MESSAGE = "Integer key: 2 must be non-negative and < 2."
with pytest.raises(IndexError) as err:
bounding_box._get_index(2)
assert str(err.value) == MESSAGE
with pytest.raises(IndexError) as err:
bounding_box._get_index(np.int32(2))
assert str(err.value) == MESSAGE
with pytest.raises(IndexError) as err:
bounding_box._get_index(np.int64(2))
assert str(err.value) == MESSAGE
with pytest.raises(IndexError) as err:
bounding_box._get_index(-1)
assert str(err.value) == "Integer key: -1 must be non-negative and < 2."
# Pass invalid key
value = mk.MagicMock()
with pytest.raises(ValueError) as err:
bounding_box._get_index(value)
assert str(err.value) == f"Key value: {value} must be string or integer."
def test__get_name(self):
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ['x']
bounding_box = self.BoundingDomain(model)
index = mk.MagicMock()
name = mk.MagicMock()
model.inputs = mk.MagicMock()
model.inputs.__getitem__.return_value = name
assert bounding_box._get_name(index) == name
assert model.inputs.__getitem__.call_args_list == [mk.call(index)]
def test_ignored_inputs(self):
model = mk.MagicMock()
ignored = list(range(4, 8))
model.n_inputs = 8
model.inputs = [mk.MagicMock() for _ in range(8)]
bounding_box = self.BoundingDomain(model, ignored=ignored)
inputs = bounding_box.ignored_inputs
assert isinstance(inputs, list)
for index, _input in enumerate(inputs):
assert _input in model.inputs
assert model.inputs[index + 4] == _input
for index, _input in enumerate(model.inputs):
if _input in inputs:
assert inputs[index - 4] == _input
else:
assert index < 4
def test__validate_ignored(self):
bounding_box = self.BoundingDomain(Gaussian2D())
# Pass
assert bounding_box._validate_ignored(None) == []
assert bounding_box._validate_ignored(['x', 'y']) == [0, 1]
assert bounding_box._validate_ignored([0, 1]) == [0, 1]
assert bounding_box._validate_ignored([np.int32(0), np.int64(1)]) == [0, 1]
# Fail
with pytest.raises(ValueError):
bounding_box._validate_ignored([mk.MagicMock()])
with pytest.raises(ValueError):
bounding_box._validate_ignored(['z'])
with pytest.raises(IndexError):
bounding_box._validate_ignored([3])
with pytest.raises(IndexError):
bounding_box._validate_ignored([np.int32(3)])
with pytest.raises(IndexError):
bounding_box._validate_ignored([np.int64(3)])
def test___call__(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
args = tuple(mk.MagicMock() for _ in range(3))
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
with pytest.raises(RuntimeError) as err:
bounding_box(*args, **kwargs)
assert str(err.value) == ("This bounding box is fixed by the model and does not have "
"adjustable parameters.")
def test_fix_inputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
model = mk.MagicMock()
fixed_inputs = mk.MagicMock()
with pytest.raises(NotImplementedError) as err:
bounding_box.fix_inputs(model, fixed_inputs)
assert str(err.value) == "This should be implemented by a child class."
def test__prepare_inputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
with pytest.raises(NotImplementedError) as err:
bounding_box.prepare_inputs(mk.MagicMock(), mk.MagicMock())
assert str(err.value) == "This has not been implemented for BoundingDomain."
def test__base_ouput(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Simple shape
input_shape = (13,)
output = bounding_box._base_output(input_shape, 0)
assert (output == 0).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, np.nan)
assert (np.isnan(output)).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, 14)
assert (output == 14).all()
assert output.shape == input_shape
# Complex shape
input_shape = (13, 7)
output = bounding_box._base_output(input_shape, 0)
assert (output == 0).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, np.nan)
assert (np.isnan(output)).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, 14)
assert (output == 14).all()
assert output.shape == input_shape
def test__all_out_output(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
# Simple shape
model.n_outputs = 1
input_shape = (13,)
output, output_unit = bounding_box._all_out_output(input_shape, 0)
assert (np.array(output) == 0).all()
assert np.array(output).shape == (1, 13)
assert output_unit is None
# Complex shape
model.n_outputs = 6
input_shape = (13, 7)
output, output_unit = bounding_box._all_out_output(input_shape, 0)
assert (np.array(output) == 0).all()
assert np.array(output).shape == (6, 13, 7)
assert output_unit is None
def test__modify_output(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
# Simple shape
with mk.patch.object(_BoundingDomain, '_base_output', autospec=True,
return_value=np.asanyarray(0)) as mkBase:
assert (np.array([1, 2, 3]) ==
bounding_box._modify_output([1, 2, 3], valid_index,
input_shape, fill_value)).all()
assert mkBase.call_args_list == [mk.call(input_shape, fill_value)]
# Replacement
with mk.patch.object(_BoundingDomain, '_base_output', autospec=True,
return_value=np.array([1, 2, 3, 4, 5, 6])) as mkBase:
assert (np.array([7, 2, 8, 4, 9, 6]) ==
bounding_box._modify_output([7, 8, 9], np.array([[0, 2, 4]]),
input_shape, fill_value)).all()
assert mkBase.call_args_list == [mk.call(input_shape, fill_value)]
def test__prepare_outputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
valid_outputs = [mk.MagicMock() for _ in range(3)]
effects = [mk.MagicMock() for _ in range(3)]
with mk.patch.object(_BoundingDomain, '_modify_output', autospec=True,
side_effect=effects) as mkModify:
assert effects == bounding_box._prepare_outputs(valid_outputs, valid_index,
input_shape, fill_value)
assert mkModify.call_args_list == [
mk.call(bounding_box, valid_outputs[idx], valid_index, input_shape, fill_value)
for idx in range(3)
]
def test_prepare_outputs(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
valid_outputs = mk.MagicMock()
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
with mk.patch.object(_BoundingDomain, '_prepare_outputs', autospec=True) as mkPrepare:
# Reshape valid_outputs
model.n_outputs = 1
assert mkPrepare.return_value == bounding_box.prepare_outputs(valid_outputs,
valid_index,
input_shape,
fill_value)
assert mkPrepare.call_args_list == [
mk.call(bounding_box, [valid_outputs], valid_index, input_shape, fill_value)
]
mkPrepare.reset_mock()
# No reshape valid_outputs
model.n_outputs = 2
assert mkPrepare.return_value == bounding_box.prepare_outputs(valid_outputs,
valid_index,
input_shape,
fill_value)
assert mkPrepare.call_args_list == [
mk.call(bounding_box, valid_outputs, valid_index, input_shape, fill_value)
]
def test__get_valid_outputs_unit(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Don't get unit
assert bounding_box._get_valid_outputs_unit(mk.MagicMock(), False) is None
# Get unit from unitless
assert bounding_box._get_valid_outputs_unit(7, True) is None
# Get unit
assert bounding_box._get_valid_outputs_unit(25 * u.m, True) == u.m
def test__evaluate_model(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
evaluate = mk.MagicMock()
valid_inputs = mk.MagicMock()
input_shape = mk.MagicMock()
valid_index = mk.MagicMock()
fill_value = mk.MagicMock()
with_units = mk.MagicMock()
with mk.patch.object(_BoundingDomain, '_get_valid_outputs_unit',
autospec=True) as mkGet:
with mk.patch.object(_BoundingDomain, 'prepare_outputs',
autospec=True) as mkPrepare:
assert bounding_box._evaluate_model(evaluate, valid_inputs,
valid_index, input_shape,
fill_value, with_units) == (
mkPrepare.return_value,
mkGet.return_value
)
assert mkPrepare.call_args_list == [mk.call(bounding_box, evaluate.return_value,
valid_index, input_shape, fill_value)]
assert mkGet.call_args_list == [mk.call(evaluate.return_value, with_units)]
assert evaluate.call_args_list == [mk.call(valid_inputs)]
def test__evaluate(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
evaluate = mk.MagicMock()
inputs = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
with_units = mk.MagicMock()
valid_inputs = mk.MagicMock()
valid_index = mk.MagicMock()
effects = [(valid_inputs, valid_index, True), (valid_inputs, valid_index, False)]
with mk.patch.object(self.BoundingDomain, 'prepare_inputs', autospec=True,
side_effect=effects) as mkPrepare:
with mk.patch.object(_BoundingDomain, '_all_out_output',
autospec=True) as mkAll:
with mk.patch.object(_BoundingDomain, '_evaluate_model',
autospec=True) as mkEvaluate:
# all_out
assert bounding_box._evaluate(evaluate, inputs, input_shape,
fill_value, with_units) == mkAll.return_value
assert mkAll.call_args_list == [mk.call(bounding_box, input_shape, fill_value)]
assert mkEvaluate.call_args_list == []
assert mkPrepare.call_args_list == [mk.call(bounding_box, input_shape, inputs)]
mkAll.reset_mock()
mkPrepare.reset_mock()
# not all_out
assert bounding_box._evaluate(evaluate, inputs, input_shape,
fill_value, with_units) == mkEvaluate.return_value
assert mkAll.call_args_list == []
assert mkEvaluate.call_args_list == [mk.call(bounding_box, evaluate,
valid_inputs, valid_index,
input_shape, fill_value,
with_units)]
assert mkPrepare.call_args_list == [mk.call(bounding_box, input_shape, inputs)]
def test__set_outputs_unit(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# set no unit
assert 27 == bounding_box._set_outputs_unit(27, None)
# set unit
assert 27 * u.m == bounding_box._set_outputs_unit(27, u.m)
def test_evaluate(self):
bounding_box = self.BoundingDomain(Gaussian2D())
evaluate = mk.MagicMock()
inputs = mk.MagicMock()
fill_value = mk.MagicMock()
outputs = mk.MagicMock()
valid_outputs_unit = mk.MagicMock()
value = (outputs, valid_outputs_unit)
with mk.patch.object(_BoundingDomain, '_evaluate',
autospec=True, return_value=value) as mkEvaluate:
with mk.patch.object(_BoundingDomain, '_set_outputs_unit',
autospec=True) as mkSet:
with mk.patch.object(Model, 'input_shape', autospec=True) as mkShape:
with mk.patch.object(Model, 'bbox_with_units',
new_callable=mk.PropertyMock) as mkUnits:
assert tuple(mkSet.return_value) == bounding_box.evaluate(evaluate, inputs,
fill_value)
assert mkSet.call_args_list == [mk.call(outputs, valid_outputs_unit)]
assert mkEvaluate.call_args_list == [mk.call(bounding_box, evaluate, inputs,
mkShape.return_value,
fill_value,
mkUnits.return_value)]
assert mkShape.call_args_list == [mk.call(bounding_box._model, inputs)]
assert mkUnits.call_args_list == [mk.call()]
class TestModelBoundingBox:
def test_create(self):
intervals = ()
model = mk.MagicMock()
bounding_box = ModelBoundingBox(intervals, model)
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {}
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == 'C'
# Set optional
intervals = {}
model = mk.MagicMock()
bounding_box = ModelBoundingBox(intervals, model, order='F')
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {}
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == 'F'
# Set interval
intervals = (1, 2)
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ['x']
bounding_box = ModelBoundingBox(intervals, model)
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2)}
assert bounding_box._model == model
# Set ignored
intervals = (1, 2)
model = mk.MagicMock()
model.n_inputs = 2
model.inputs = ['x', 'y']
bounding_box = ModelBoundingBox(intervals, model, ignored=[1])
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2)}
assert bounding_box._model == model
assert bounding_box._ignored == [1]
intervals = ((1, 2), (3, 4))
model = mk.MagicMock()
model.n_inputs = 3
model.inputs = ['x', 'y', 'z']
bounding_box = ModelBoundingBox(intervals, model, ignored=[2], order='F')
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2), 1: (3, 4)}
assert bounding_box._model == model
assert bounding_box._ignored == [2]
assert bounding_box._order == 'F'
def test_copy(self):
bounding_box = ModelBoundingBox.validate(Gaussian2D(), ((-4.5, 4.5), (-1.4, 1.4)))
copy = bounding_box.copy()
assert bounding_box == copy
assert id(bounding_box) != id(copy)
assert bounding_box.ignored == copy.ignored
assert id(bounding_box.ignored) != id(copy.ignored)
# model is not copied to prevent infinite recursion
assert bounding_box._model == copy._model
assert id(bounding_box._model) == id(copy._model)
# Same string values have will have same id
assert bounding_box._order == copy._order
assert id(bounding_box._order) == id(copy._order)
# Check interval objects
for index, interval in bounding_box.intervals.items():
assert interval == copy.intervals[index]
assert id(interval) != id(copy.intervals[index])
# Same float values have will have same id
assert interval.lower == copy.intervals[index].lower
assert id(interval.lower) == id(copy.intervals[index].lower)
# Same float values have will have same id
assert interval.upper == copy.intervals[index].upper
assert id(interval.upper) == id(copy.intervals[index].upper)
assert len(bounding_box.intervals) == len(copy.intervals)
assert bounding_box.intervals.keys() == copy.intervals.keys()
def test_intervals(self):
intervals = {0: _Interval(1, 2)}
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ['x']
bounding_box = ModelBoundingBox(intervals, model)
assert bounding_box._intervals == intervals
assert bounding_box.intervals == intervals
def test_named_intervals(self):
intervals = {idx: _Interval(idx, idx + 1) for idx in range(4)}
model = mk.MagicMock()
model.n_inputs = 4
model.inputs = [mk.MagicMock() for _ in range(4)]
bounding_box = ModelBoundingBox(intervals, model)
named = bounding_box.named_intervals
assert isinstance(named, dict)
for name, interval in named.items():
assert name in model.inputs
assert intervals[model.inputs.index(name)] == interval
for index, name in enumerate(model.inputs):
assert index in intervals
assert name in named
assert intervals[index] == named[name]
def test___repr__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.__repr__() == (
"ModelBoundingBox(\n"
" intervals={\n"
" x: Interval(lower=-1, upper=1)\n"
" y: Interval(lower=-4, upper=4)\n"
" }\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
")"
)
intervals = {0: _Interval(-1, 1)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals, ignored=['y'])
assert bounding_box.__repr__() == (
"ModelBoundingBox(\n"
" intervals={\n"
" x: Interval(lower=-1, upper=1)\n"
" }\n"
" ignored=['y']\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
")"
)
def test___len__(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert len(bounding_box) == 1 == len(bounding_box._intervals)
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert len(bounding_box) == 2 == len(bounding_box._intervals)
bounding_box._intervals = {}
assert len(bounding_box) == 0 == len(bounding_box._intervals)
def test___contains__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Contains with keys
assert 'x' in bounding_box
assert 'y' in bounding_box
assert 'z' not in bounding_box
# Contains with index
assert 0 in bounding_box
assert 1 in bounding_box
assert 2 not in bounding_box
# General not in
assert mk.MagicMock() not in bounding_box
# Contains with ignored
del bounding_box['y']
# Contains with keys
assert 'x' in bounding_box
assert 'y' in bounding_box
assert 'z' not in bounding_box
# Contains with index
assert 0 in bounding_box
assert 1 in bounding_box
assert 2 not in bounding_box
def test___getitem__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Get using input key
assert bounding_box['x'] == (-1, 1)
assert bounding_box['y'] == (-4, 4)
# Fail with input key
with pytest.raises(ValueError):
bounding_box['z']
# Get using index
assert bounding_box[0] == (-1, 1)
assert bounding_box[1] == (-4, 4)
assert bounding_box[np.int32(0)] == (-1, 1)
assert bounding_box[np.int32(1)] == (-4, 4)
assert bounding_box[np.int64(0)] == (-1, 1)
assert bounding_box[np.int64(1)] == (-4, 4)
# Fail with index
with pytest.raises(IndexError):
bounding_box[2]
with pytest.raises(IndexError):
bounding_box[np.int32(2)]
with pytest.raises(IndexError):
bounding_box[np.int64(2)]
# get ignored interval
del bounding_box[0]
assert bounding_box[0] == _ignored_interval
assert bounding_box[1] == (-4, 4)
del bounding_box[1]
assert bounding_box[0] == _ignored_interval
assert bounding_box[1] == _ignored_interval
def test_bounding_box(self):
# 0D
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, {}, ignored=['x'])
assert bounding_box.bounding_box() == (-np.inf, np.inf)
assert bounding_box.bounding_box('C') == (-np.inf, np.inf)
assert bounding_box.bounding_box('F') == (-np.inf, np.inf)
# 1D
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.bounding_box() == (-1, 1)
assert bounding_box.bounding_box(mk.MagicMock()) == (-1, 1)
# > 1D
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.bounding_box() == ((-4, 4), (-1, 1))
assert bounding_box.bounding_box('C') == ((-4, 4), (-1, 1))
assert bounding_box.bounding_box('F') == ((-1, 1), (-4, 4))
def test___eq__(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model.copy(), intervals.copy())
assert bounding_box == bounding_box
assert bounding_box == ModelBoundingBox.validate(model.copy(), intervals.copy())
assert bounding_box == (-1, 1)
assert not (bounding_box == mk.MagicMock())
assert not (bounding_box == (-2, 2))
assert not (bounding_box == ModelBoundingBox.validate(model, {0: _Interval(-2, 2)}))
# Respect ordering
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box_1 = ModelBoundingBox.validate(model, intervals)
bounding_box_2 = ModelBoundingBox.validate(model, intervals, order='F')
assert bounding_box_1._order == 'C'
assert bounding_box_1 == ((-4, 4), (-1, 1))
assert not (bounding_box_1 == ((-1, 1), (-4, 4)))
assert bounding_box_2._order == 'F'
assert not (bounding_box_2 == ((-4, 4), (-1, 1)))
assert bounding_box_2 == ((-1, 1), (-4, 4))
assert bounding_box_1 == bounding_box_2
# Respect ignored
model = Gaussian2D()
bounding_box_1._ignored = [mk.MagicMock()]
bounding_box_2._ignored = [mk.MagicMock()]
assert bounding_box_1._ignored != bounding_box_2._ignored
assert not (bounding_box_1 == bounding_box_2)
def test__setitem__(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, {}, ignored=[0, 1])
assert bounding_box._ignored == [0, 1]
# USING Intervals directly
# Set interval using key
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box['x'] = _Interval(-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box['x'], _Interval)
assert bounding_box['x'] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box['y'] = _Interval(-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box['y'], _Interval)
assert bounding_box['y'] == (-4, 4)
del bounding_box['x']
del bounding_box['y']
# Set interval using index
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box[0] = _Interval(-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box[0], _Interval)
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box[1] = _Interval(-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box[1], _Interval)
assert bounding_box[1] == (-4, 4)
del bounding_box[0]
del bounding_box[1]
# USING tuples
# Set interval using key
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box['x'] = (-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box['x'], _Interval)
assert bounding_box['x'] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box['y'] = (-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box['y'], _Interval)
assert bounding_box['y'] == (-4, 4)
del bounding_box['x']
del bounding_box['y']
# Set interval using index
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box[0] = (-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box[0], _Interval)
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box[1] = (-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box[1], _Interval)
assert bounding_box[1] == (-4, 4)
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
# USING Intervals directly
# Set interval using key
assert 'x' not in bounding_box
bounding_box['x'] = _Interval(np.array([-1, -2]), np.array([1, 2]))
assert 'x' in bounding_box
assert isinstance(bounding_box['x'], _Interval)
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
# Set interval using index
bounding_box._intervals = {}
assert 0 not in bounding_box
bounding_box[0] = _Interval(np.array([-1, -2]), np.array([1, 2]))
assert 0 in bounding_box
assert isinstance(bounding_box[0], _Interval)
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
# USING tuples
# Set interval using key
bounding_box._intervals = {}
assert 'x' not in bounding_box
bounding_box['x'] = (np.array([-1, -2]), np.array([1, 2]))
assert 'x' in bounding_box
assert isinstance(bounding_box['x'], _Interval)
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
# Set interval using index
bounding_box._intervals = {}
assert 0 not in bounding_box
bounding_box[0] = (np.array([-1, -2]), np.array([1, 2]))
assert 0 in bounding_box
assert isinstance(bounding_box[0], _Interval)
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
def test___delitem__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Using index
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert 0 in bounding_box
assert 'x' in bounding_box
del bounding_box[0]
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
assert 0 in bounding_box
assert 'x' in bounding_box
# Delete an ignored item
with pytest.raises(RuntimeError) as err:
del bounding_box[0]
assert str(err.value) == "Cannot delete ignored input: 0!"
# Using key
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert 0 in bounding_box
assert 'y' in bounding_box
del bounding_box['y']
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
assert 0 in bounding_box
assert 'y' in bounding_box
# Delete an ignored item
with pytest.raises(RuntimeError) as err:
del bounding_box['y']
assert str(err.value) == "Cannot delete ignored input: y!"
def test__validate_dict(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Input name keys
intervals = {'x': _Interval(-1, 1), 'y': _Interval(-4, 4)}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_dict(intervals)
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Input index
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert 0 not in bounding_box
assert 1 not in bounding_box
bounding_box._validate_dict(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
# name keys
intervals = {'x': _Interval(np.array([-1, -2]), np.array([1, 2]))}
assert 'x' not in bounding_box
bounding_box._validate_dict(intervals)
assert 'x' in bounding_box
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
# input index
bounding_box._intervals = {}
intervals = {0: _Interval(np.array([-1, -2]), np.array([1, 2]))}
assert 0 not in bounding_box
bounding_box._validate_dict(intervals)
assert 0 in bounding_box
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
def test__validate_sequence(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Default order
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)))
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# C order
bounding_box._intervals = {}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order='C')
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Fortran order
bounding_box._intervals = {}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order='F')
assert 'x' in bounding_box
assert bounding_box['x'] == (-4, 4)
assert 'y' in bounding_box
assert bounding_box['y'] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Invalid order
bounding_box._intervals = {}
order = mk.MagicMock()
assert 'x' not in bounding_box
assert 'y' not in bounding_box
with pytest.raises(ValueError):
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order=order)
assert 'x' not in bounding_box
assert 'y' not in bounding_box
assert len(bounding_box.intervals) == 0
def test__n_inputs(self):
model = Gaussian2D()
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box._n_inputs == 2
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox.validate(model, intervals, ignored=['y'])
assert bounding_box._n_inputs == 1
bounding_box = ModelBoundingBox.validate(model, {}, ignored=['x', 'y'])
assert bounding_box._n_inputs == 0
bounding_box._ignored = ['x', 'y', 'z']
assert bounding_box._n_inputs == 0
def test__validate_iterable(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Pass sequence Default order
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_iterable(((-4, 4), (-1, 1)))
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box._intervals = {}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_iterable(((-4, 4), (-1, 1)), order='F')
assert 'x' in bounding_box
assert bounding_box['x'] == (-4, 4)
assert 'y' in bounding_box
assert bounding_box['y'] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert 0 not in bounding_box
assert 1 not in bounding_box
bounding_box._validate_iterable(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass with ignored
bounding_box._intervals = {}
bounding_box._ignored = [1]
intervals = {0: _Interval(-1, 1)}
assert 0 not in bounding_box.intervals
bounding_box._validate_iterable(intervals)
assert 0 in bounding_box.intervals
assert bounding_box[0] == (-1, 1)
# Invalid iterable
bounding_box._intervals = {}
bounding_box._ignored = []
assert 'x' not in bounding_box
assert 'y' not in bounding_box
with pytest.raises(ValueError) as err:
bounding_box._validate_iterable(((-4, 4), (-1, 1), (-3, 3)))
assert str(err.value) == "Found 3 intervals, but must have exactly 2."
assert len(bounding_box.intervals) == 0
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._ignored = [1]
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
with pytest.raises(ValueError) as err:
bounding_box._validate_iterable(intervals)
assert str(err.value) == "Found 2 intervals, but must have exactly 1."
assert len(bounding_box.intervals) == 0
bounding_box._ignored = []
intervals = {0: _Interval(-1, 1)}
with pytest.raises(ValueError) as err:
bounding_box._validate_iterable(intervals)
assert str(err.value) == "Found 1 intervals, but must have exactly 2."
assert 'x' not in bounding_box
assert 'y' not in bounding_box
assert len(bounding_box.intervals) == 0
def test__validate(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Pass sequence Default order
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate(((-4, 4), (-1, 1)))
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box._intervals = {}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate(((-4, 4), (-1, 1)), order='F')
assert 'x' in bounding_box
assert bounding_box['x'] == (-4, 4)
assert 'y' in bounding_box
assert bounding_box['y'] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass single with ignored
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox({}, model, ignored=[1])
assert 0 not in bounding_box.intervals
assert 1 not in bounding_box.intervals
bounding_box._validate(intervals)
assert 0 in bounding_box.intervals
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert len(bounding_box.intervals) == 1
# Pass single
model = Gaussian1D()
bounding_box = ModelBoundingBox({}, model)
assert 'x' not in bounding_box
bounding_box._validate((-1, 1))
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert len(bounding_box.intervals) == 1
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
sequence = (np.array([-1, -2]), np.array([1, 2]))
assert 'x' not in bounding_box
bounding_box._validate(sequence)
assert 'x' in bounding_box
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
def test_validate(self):
model = Gaussian2D()
kwargs = {'test': mk.MagicMock()}
# Pass sequence Default order
bounding_box = ModelBoundingBox.validate(model, ((-4, 4), (-1, 1)), **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box = ModelBoundingBox.validate(model, ((-4, 4), (-1, 1)), order='F', **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 'x' in bounding_box
assert bounding_box['x'] == (-4, 4)
assert 'y' in bounding_box
assert bounding_box['y'] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
bounding_box = ModelBoundingBox.validate(model, intervals, order='F', **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
assert bounding_box.order == 'F'
# Pass ModelBoundingBox
bbox = bounding_box
bounding_box = ModelBoundingBox.validate(model, bbox, **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
assert bounding_box.order == 'F'
# Pass single ignored
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox.validate(model, intervals, ignored=['y'], **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == _ignored_interval
assert len(bounding_box.intervals) == 1
# Pass single
bounding_box = ModelBoundingBox.validate(Gaussian1D(), (-1, 1), **kwargs)
assert (bounding_box._model.parameters == Gaussian1D().parameters).all()
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert len(bounding_box.intervals) == 1
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
sequence = (np.array([-1, -2]), np.array([1, 2]))
bounding_box = ModelBoundingBox.validate(model, sequence, **kwargs)
assert 'x' in bounding_box
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
def test_fix_inputs(self):
bounding_box = ModelBoundingBox.validate(Gaussian2D(), ((-4, 4), (-1, 1)))
# keep_ignored = False (default)
new_bounding_box = bounding_box.fix_inputs(Gaussian1D(), {1: mk.MagicMock()})
assert not (bounding_box == new_bounding_box)
assert (new_bounding_box._model.parameters == Gaussian1D().parameters).all()
assert 'x' in new_bounding_box
assert new_bounding_box['x'] == (-1, 1)
assert 'y' not in new_bounding_box
assert len(new_bounding_box.intervals) == 1
assert new_bounding_box.ignored == []
# keep_ignored = True
new_bounding_box = bounding_box.fix_inputs(Gaussian2D(), {1: mk.MagicMock()},
_keep_ignored=True)
assert not (bounding_box == new_bounding_box)
assert (new_bounding_box._model.parameters == Gaussian2D().parameters).all()
assert 'x' in new_bounding_box
assert new_bounding_box['x'] == (-1, 1)
assert 'y' in new_bounding_box
assert 'y' in new_bounding_box.ignored_inputs
assert len(new_bounding_box.intervals) == 1
assert new_bounding_box.ignored == [1]
def test_dimension(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.dimension == 1 == len(bounding_box._intervals)
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.dimension == 2 == len(bounding_box._intervals)
bounding_box._intervals = {}
assert bounding_box.dimension == 0 == len(bounding_box._intervals)
def test_domain(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# test defaults
assert (np.array(bounding_box.domain(0.25)) ==
np.array([np.linspace(0, 2, 9), np.linspace(-1, 1, 9)])).all()
# test C order
assert (np.array(bounding_box.domain(0.25, 'C')) ==
np.array([np.linspace(0, 2, 9), np.linspace(-1, 1, 9)])).all()
# test Fortran order
assert (np.array(bounding_box.domain(0.25, 'F')) ==
np.array([np.linspace(-1, 1, 9), np.linspace(0, 2, 9)])).all()
# test error order
order = mk.MagicMock()
with pytest.raises(ValueError):
bounding_box.domain(0.25, order)
def test__outside(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [False for _ in range(13)]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index ==
[True, True, True, True,
False, False, False, False, False,
True, True, True, True]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [True for _ in range(13)]).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [False]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [True]).all()
assert all_out and isinstance(all_out, bool)
def test__valid_index(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [idx for idx in range(13)]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [4, 5, 6, 7, 8]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [0]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
def test_prepare_inputs(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert (np.array(new_inputs) == np.array(inputs)).all()
assert len(valid_index) == 1
assert (valid_index[0] == [idx for idx in range(13)]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert (np.array(new_inputs) ==
np.array(
[
[x[4], x[5], x[6], x[7], x[8]],
[y[4], y[5], y[6], y[7], y[8]],
]
)).all()
assert len(valid_index) == 1
assert (valid_index[0] == [4, 5, 6, 7, 8]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert new_inputs == ()
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert (np.array(new_inputs) == np.array([[0.5], [0.5]])).all()
assert len(valid_index) == 1
assert (valid_index[0] == [0]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert new_inputs == ()
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
def test_bounding_box_ignore(self):
"""Regression test for #13028"""
bbox_x = ModelBoundingBox((9, 10), Polynomial2D(1), ignored=["x"])
assert bbox_x.ignored_inputs == ['x']
bbox_y = ModelBoundingBox((11, 12), Polynomial2D(1), ignored=["y"])
assert bbox_y.ignored_inputs == ['y']
class Test_SelectorArgument:
def test_create(self):
index = mk.MagicMock()
ignore = mk.MagicMock()
argument = _SelectorArgument(index, ignore)
assert isinstance(argument, _BaseSelectorArgument)
assert argument.index == index
assert argument.ignore == ignore
assert argument == (index, ignore)
def test_validate(self):
model = Gaussian2D()
# default integer
assert _SelectorArgument.validate(model, 0) == (0, True)
assert _SelectorArgument.validate(model, 1) == (1, True)
# default string
assert _SelectorArgument.validate(model, 'x') == (0, True)
assert _SelectorArgument.validate(model, 'y') == (1, True)
ignore = mk.MagicMock()
# non-default integer
assert _SelectorArgument.validate(model, 0, ignore) == (0, ignore)
assert _SelectorArgument.validate(model, 1, ignore) == (1, ignore)
# non-default string
assert _SelectorArgument.validate(model, 'x', ignore) == (0, ignore)
assert _SelectorArgument.validate(model, 'y', ignore) == (1, ignore)
# Fail
with pytest.raises(ValueError):
_SelectorArgument.validate(model, 'z')
with pytest.raises(ValueError):
_SelectorArgument.validate(model, mk.MagicMock())
with pytest.raises(IndexError):
_SelectorArgument.validate(model, 2)
def test_get_selector(self):
# single inputs
inputs = [idx + 17 for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == inputs[index]
# numpy array of single inputs
inputs = [np.array([idx + 11]) for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == inputs[index]
inputs = [np.asanyarray(idx + 13) for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == inputs[index]
# multi entry numpy array
inputs = [np.array([idx + 27, idx - 31]) for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index,
mk.MagicMock()).get_selector(*inputs) == tuple(inputs[index])
def test_name(self):
model = Gaussian2D()
for index in range(model.n_inputs):
assert _SelectorArgument(index, mk.MagicMock()).name(model) == model.inputs[index]
def test_pretty_repr(self):
model = Gaussian2D()
assert _SelectorArgument(0, False).pretty_repr(model) == "Argument(name='x', ignore=False)"
assert _SelectorArgument(0, True).pretty_repr(model) == "Argument(name='x', ignore=True)"
assert _SelectorArgument(1, False).pretty_repr(model) == "Argument(name='y', ignore=False)"
assert _SelectorArgument(1, True).pretty_repr(model) == "Argument(name='y', ignore=True)"
def test_get_fixed_value(self):
model = Gaussian2D()
values = {0: 5, 'y': 7}
# Get index value
assert _SelectorArgument(0, mk.MagicMock()).get_fixed_value(model, values) == 5
# Get name value
assert _SelectorArgument(1, mk.MagicMock()).get_fixed_value(model, values) == 7
# Fail
values = {0: 5}
with pytest.raises(RuntimeError) as err:
_SelectorArgument(1, True).get_fixed_value(model, values)
assert str(err.value) == "Argument(name='y', ignore=True) was not found in {0: 5}"
def test_is_argument(self):
model = Gaussian2D()
argument = _SelectorArgument.validate(model, 0)
# Is true
assert argument.is_argument(model, 0) is True
assert argument.is_argument(model, 'x') is True
# Is false
assert argument.is_argument(model, 1) is False
assert argument.is_argument(model, 'y') is False
# Fail
with pytest.raises(ValueError):
argument.is_argument(model, 'z')
with pytest.raises(ValueError):
argument.is_argument(model, mk.MagicMock())
with pytest.raises(IndexError):
argument.is_argument(model, 2)
def test_named_tuple(self):
model = Gaussian2D()
for index in range(model.n_inputs):
ignore = mk.MagicMock()
assert _SelectorArgument(index, ignore).named_tuple(model) == (model.inputs[index],
ignore)
class Test_SelectorArguments:
def test_create(self):
arguments = _SelectorArguments((_SelectorArgument(0, True), _SelectorArgument(1, False)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments._kept_ignore == []
kept_ignore = mk.MagicMock()
arguments = _SelectorArguments((_SelectorArgument(0, True),
_SelectorArgument(1, False)), kept_ignore)
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments._kept_ignore == kept_ignore
def test_pretty_repr(self):
model = Gaussian2D()
arguments = _SelectorArguments((_SelectorArgument(0, True), _SelectorArgument(1, False)))
assert arguments.pretty_repr(model) == (
"SelectorArguments(\n"
" Argument(name='x', ignore=True)\n"
" Argument(name='y', ignore=False)\n"
")"
)
def test_ignore(self):
assert _SelectorArguments((_SelectorArgument(0, True),
_SelectorArgument(1, True))).ignore == [0, 1]
assert _SelectorArguments((_SelectorArgument(0, True),
_SelectorArgument(1, True)), [13, 4]).ignore == [0, 1, 13, 4]
assert _SelectorArguments((_SelectorArgument(0, True),
_SelectorArgument(1, False))).ignore == [0]
assert _SelectorArguments((_SelectorArgument(0, False),
_SelectorArgument(1, True))).ignore == [1]
assert _SelectorArguments((_SelectorArgument(0, False),
_SelectorArgument(1, False))).ignore == []
assert _SelectorArguments((_SelectorArgument(0, False),
_SelectorArgument(1, False)), [17, 14]).ignore == [17, 14]
def test_validate(self):
# Integer key and passed ignore
arguments = _SelectorArguments.validate(Gaussian2D(), ((0, True), (1, False)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments.kept_ignore == []
# Default ignore
arguments = _SelectorArguments.validate(Gaussian2D(), ((0,), (1,)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, True))
assert arguments.kept_ignore == []
# String key and passed ignore
arguments = _SelectorArguments.validate(Gaussian2D(), (('x', True), ('y', False)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments.kept_ignore == []
# Test kept_ignore option
new_arguments = _SelectorArguments.validate(Gaussian2D(), arguments, [11, 5, 8])
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True), (1, False))
assert new_arguments.kept_ignore == [11, 5, 8]
arguments._kept_ignore = [13, 17, 14]
new_arguments = _SelectorArguments.validate(Gaussian2D(), arguments)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True), (1, False))
assert new_arguments.kept_ignore == [13, 17, 14]
# Invalid, bad argument
with pytest.raises(ValueError):
_SelectorArguments.validate(Gaussian2D(), ((0, True), ('z', False)))
with pytest.raises(ValueError):
_SelectorArguments.validate(Gaussian2D(), ((mk.MagicMock(), True), (1, False)))
with pytest.raises(IndexError):
_SelectorArguments.validate(Gaussian2D(), ((0, True), (2, False)))
# Invalid, repeated argument
with pytest.raises(ValueError) as err:
_SelectorArguments.validate(Gaussian2D(), ((0, True), (0, False)))
assert str(err.value) == "Input: 'x' has been repeated."
# Invalid, no arguments
with pytest.raises(ValueError) as err:
_SelectorArguments.validate(Gaussian2D(), ())
assert str(err.value) == "There must be at least one selector argument."
def test_get_selector(self):
inputs = [idx + 19 for idx in range(4)]
assert _SelectorArguments.validate(Gaussian2D(),
((0, True),
(1, False))).get_selector(*inputs) == tuple(inputs[:2])
assert _SelectorArguments.validate(Gaussian2D(),
((1, True),
(0, False))).get_selector(*inputs) == tuple(inputs[:2][::-1]) # noqa: E501
assert _SelectorArguments.validate(Gaussian2D(),
((1, False),)).get_selector(*inputs) == (inputs[1],)
assert _SelectorArguments.validate(Gaussian2D(),
((0, True),)).get_selector(*inputs) == (inputs[0],)
def test_is_selector(self):
# Is Selector
assert _SelectorArguments.validate(Gaussian2D(),
((0, True), (1, False))).is_selector((0.5, 2.5))
assert _SelectorArguments.validate(Gaussian2D(),
((0, True),)).is_selector((0.5,))
# Is not selector
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True), (1, False))).is_selector((0.5, 2.5, 3.5))
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True), (1, False))).is_selector((0.5,))
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True), (1, False))).is_selector(0.5)
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True),)).is_selector((0.5, 2.5))
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True),)).is_selector(2.5)
def test_get_fixed_values(self):
model = Gaussian2D()
assert _SelectorArguments.validate(model, ((0, True), (1, False))).get_fixed_values(
model, {0: 11, 1: 7}) == (11, 7)
assert _SelectorArguments.validate(model, ((0, True), (1, False))).get_fixed_values(
model, {0: 5, 'y': 47}) == (5, 47)
assert _SelectorArguments.validate(model, ((0, True), (1, False))).get_fixed_values(
model, {'x': 2, 'y': 9}) == (2, 9)
assert _SelectorArguments.validate(model, ((0, True), (1, False))).get_fixed_values(
model, {'x': 12, 1: 19}) == (12, 19)
def test_is_argument(self):
model = Gaussian2D()
# Is true
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.is_argument(model, 0) is True
assert arguments.is_argument(model, 'x') is True
assert arguments.is_argument(model, 1) is True
assert arguments.is_argument(model, 'y') is True
# Is true and false
arguments = _SelectorArguments.validate(model, ((0, True),))
assert arguments.is_argument(model, 0) is True
assert arguments.is_argument(model, 'x') is True
assert arguments.is_argument(model, 1) is False
assert arguments.is_argument(model, 'y') is False
arguments = _SelectorArguments.validate(model, ((1, False),))
assert arguments.is_argument(model, 0) is False
assert arguments.is_argument(model, 'x') is False
assert arguments.is_argument(model, 1) is True
assert arguments.is_argument(model, 'y') is True
def test_selector_index(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.selector_index(model, 0) == 0
assert arguments.selector_index(model, 'x') == 0
assert arguments.selector_index(model, 1) == 1
assert arguments.selector_index(model, 'y') == 1
arguments = _SelectorArguments.validate(model, ((1, True), (0, False)))
assert arguments.selector_index(model, 0) == 1
assert arguments.selector_index(model, 'x') == 1
assert arguments.selector_index(model, 1) == 0
assert arguments.selector_index(model, 'y') == 0
# Error
arguments = _SelectorArguments.validate(model, ((0, True),))
with pytest.raises(ValueError) as err:
arguments.selector_index(model, 'y')
assert str(err.value) == "y does not correspond to any selector argument."
def test_add_ignore(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), ))
assert arguments == ((0, True),)
assert arguments._kept_ignore == []
new_arguments0 = arguments.add_ignore(model, 1)
assert new_arguments0 == arguments
assert new_arguments0._kept_ignore == [1]
assert arguments._kept_ignore == []
assert arguments._kept_ignore == []
new_arguments1 = new_arguments0.add_ignore(model, 'y')
assert new_arguments1 == arguments == new_arguments0
assert new_arguments0._kept_ignore == [1]
assert new_arguments1._kept_ignore == [1, 1]
assert arguments._kept_ignore == []
# Error
with pytest.raises(ValueError) as err:
arguments.add_ignore(model, 0)
assert str(err.value) == "0: is a selector argument and cannot be ignored."
def test_reduce(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
new_arguments = arguments.reduce(model, 0)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((1, False),)
assert new_arguments._kept_ignore == [0]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, 'x')
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((1, False),)
assert new_arguments._kept_ignore == [0]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, 1)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True),)
assert new_arguments._kept_ignore == [1]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, 'y')
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True),)
assert new_arguments._kept_ignore == [1]
assert arguments._kept_ignore == []
def test_named_tuple(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.named_tuple(model) == (('x', True), ('y', False))
class TestCompoundBoundingBox:
def test_create(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox(bounding_boxes, model,
selector_args, create_selector, order='F')
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
for _selector, bbox in bounding_boxes.items():
assert _selector in bounding_box._bounding_boxes
assert bounding_box._bounding_boxes[_selector] == bbox
for _selector, bbox in bounding_box._bounding_boxes.items():
assert _selector in bounding_boxes
assert bounding_boxes[_selector] == bbox
assert isinstance(bbox, ModelBoundingBox)
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == 'F'
def test_copy(self):
bounding_box = CompoundBoundingBox.validate(Gaussian2D(),
{(1,): (-1.5, 1.3), (2,): (-2.7, 2.4)},
((0, True),), mk.MagicMock())
copy = bounding_box.copy()
assert bounding_box == copy
assert id(bounding_box) != id(copy)
# model is not copied to prevent infinite recursion
assert bounding_box._model == copy._model
assert id(bounding_box._model) == id(copy._model)
# Same string values have will have same id
assert bounding_box._order == copy._order
assert id(bounding_box._order) == id(copy._order)
assert bounding_box._create_selector == copy._create_selector
assert id(bounding_box._create_selector) != id(copy._create_selector)
# Check selector_args
for index, argument in enumerate(bounding_box.selector_args):
assert argument == copy.selector_args[index]
assert id(argument) != id(copy.selector_args[index])
# Same integer values have will have same id
assert argument.index == copy.selector_args[index].index
assert id(argument.index) == id(copy.selector_args[index].index)
# Same boolean values have will have same id
assert argument.ignore == copy.selector_args[index].ignore
assert id(argument.ignore) == id(copy.selector_args[index].ignore)
assert len(bounding_box.selector_args) == len(copy.selector_args)
# Check bounding_boxes
for selector, bbox in bounding_box.bounding_boxes.items():
assert bbox == copy.bounding_boxes[selector]
assert id(bbox) != id(copy.bounding_boxes[selector])
assert bbox.ignored == copy.bounding_boxes[selector].ignored
assert id(bbox.ignored) != id(copy.bounding_boxes[selector].ignored)
# model is not copied to prevent infinite recursion
assert bbox._model == copy.bounding_boxes[selector]._model
assert id(bbox._model) == id(copy.bounding_boxes[selector]._model)
# Same string values have will have same id
assert bbox._order == copy.bounding_boxes[selector]._order
assert id(bbox._order) == id(copy.bounding_boxes[selector]._order)
# Check interval objects
for index, interval in bbox.intervals.items():
assert interval == copy.bounding_boxes[selector].intervals[index]
assert id(interval) != id(copy.bounding_boxes[selector].intervals[index])
# Same float values have will have same id
assert interval.lower == copy.bounding_boxes[selector].intervals[index].lower
assert id(interval.lower) == id(copy.bounding_boxes[selector].intervals[index].lower) # noqa: E501
# Same float values have will have same id
assert interval.upper == copy.bounding_boxes[selector].intervals[index].upper
assert id(interval.upper) == id(copy.bounding_boxes[selector].intervals[index].upper) # noqa: E501
assert len(bbox.intervals) == len(copy.bounding_boxes[selector].intervals)
assert bbox.intervals.keys() == copy.bounding_boxes[selector].intervals.keys()
assert len(bounding_box.bounding_boxes) == len(copy.bounding_boxes)
assert bounding_box.bounding_boxes.keys() == copy.bounding_boxes.keys()
def test___repr__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert bounding_box.__repr__() == (
"CompoundBoundingBox(\n"
" bounding_boxes={\n"
" (1,) = ModelBoundingBox(\n"
" intervals={\n"
" y: Interval(lower=-1, upper=1)\n"
" }\n"
" ignored=['x']\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
" )\n"
" (2,) = ModelBoundingBox(\n"
" intervals={\n"
" y: Interval(lower=-2, upper=2)\n"
" }\n"
" ignored=['x']\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
" )\n"
" }\n"
" selector_args = SelectorArguments(\n"
" Argument(name='x', ignore=True)\n"
" )\n"
")"
)
def test_bounding_boxes(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box.bounding_boxes == bounding_boxes
def test_selector_args(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_box = CompoundBoundingBox({}, model, selector_args)
# Get
assert bounding_box._selector_args == selector_args
assert bounding_box.selector_args == selector_args
# Set
selector_args = ((1, False),)
with pytest.warns(RuntimeWarning, match=r"Overriding selector_args.*"):
bounding_box.selector_args = selector_args
assert bounding_box._selector_args == selector_args
assert bounding_box.selector_args == selector_args
def test_create_selector(self):
model = Gaussian2D()
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox({}, model, ((1,),), create_selector)
assert bounding_box._create_selector == create_selector
assert bounding_box.create_selector == create_selector
def test__get_selector_key(self):
bounding_box = CompoundBoundingBox({}, Gaussian2D(), ((1, True),))
assert len(bounding_box.bounding_boxes) == 0
# Singlar
assert bounding_box._get_selector_key(5) == (5,)
assert bounding_box._get_selector_key((5,)) == (5,)
assert bounding_box._get_selector_key([5]) == (5,)
assert bounding_box._get_selector_key(np.asanyarray(5)) == (5,)
assert bounding_box._get_selector_key(np.array([5])) == (5,)
# multiple
assert bounding_box._get_selector_key((5, 19)) == (5, 19)
assert bounding_box._get_selector_key([5, 19]) == (5, 19)
assert bounding_box._get_selector_key(np.array([5, 19])) == (5, 19)
def test___setitem__(self):
model = Gaussian2D()
# Ignored argument
bounding_box = CompoundBoundingBox({}, model, ((1, True),), order='F')
assert len(bounding_box.bounding_boxes) == 0
# Valid
bounding_box[(15, )] = (-15, 15)
assert len(bounding_box.bounding_boxes) == 1
assert (15,) in bounding_box._bounding_boxes
assert isinstance(bounding_box._bounding_boxes[(15,)], ModelBoundingBox)
assert bounding_box._bounding_boxes[(15,)] == (-15, 15)
assert bounding_box._bounding_boxes[(15,)].order == 'F'
# Invalid key
assert (7, 13) not in bounding_box._bounding_boxes
with pytest.raises(ValueError) as err:
bounding_box[(7, 13)] = (-7, 7)
assert str(err.value) == "(7, 13) is not a selector!"
assert (7, 13) not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# Invalid bounding box
assert 13 not in bounding_box._bounding_boxes
with pytest.raises(ValueError):
bounding_box[(13,)] = ((-13, 13), (-3, 3))
assert 13 not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# No ignored argument
bounding_box = CompoundBoundingBox({}, model, ((1, False),), order='F')
assert len(bounding_box.bounding_boxes) == 0
# Valid
bounding_box[(15, )] = ((-15, 15), (-6, 6))
assert len(bounding_box.bounding_boxes) == 1
assert (15,) in bounding_box._bounding_boxes
assert isinstance(bounding_box._bounding_boxes[(15,)], ModelBoundingBox)
assert bounding_box._bounding_boxes[(15,)] == ((-15, 15), (-6, 6))
assert bounding_box._bounding_boxes[(15,)].order == 'F'
# Invalid key
assert (14, 11) not in bounding_box._bounding_boxes
with pytest.raises(ValueError) as err:
bounding_box[(14, 11)] = ((-7, 7), (-12, 12))
assert str(err.value) == "(14, 11) is not a selector!"
assert (14, 11) not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# Invalid bounding box
assert 13 not in bounding_box._bounding_boxes
with pytest.raises(ValueError):
bounding_box[(13,)] = (-13, 13)
assert 13 not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
def test__validate(self):
model = Gaussian2D()
selector_args = ((0, True),)
# Tuple selector_args
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox({}, model, selector_args)
bounding_box._validate(bounding_boxes)
for _selector, bbox in bounding_boxes.items():
assert _selector in bounding_box._bounding_boxes
assert bounding_box._bounding_boxes[_selector] == bbox
for _selector, bbox in bounding_box._bounding_boxes.items():
assert _selector in bounding_boxes
assert bounding_boxes[_selector] == bbox
assert isinstance(bbox, ModelBoundingBox)
assert bounding_box._bounding_boxes == bounding_boxes
def test___eq__(self):
bounding_box_1 = CompoundBoundingBox({(1,): (-1, 1), (2,): (-2, 2)},
Gaussian2D(), ((0, True),))
bounding_box_2 = CompoundBoundingBox({(1,): (-1, 1), (2,): (-2, 2)},
Gaussian2D(), ((0, True),))
# Equal
assert bounding_box_1 == bounding_box_2
# Not equal to non-compound bounding_box
assert not bounding_box_1 == mk.MagicMock()
assert not bounding_box_2 == mk.MagicMock()
# Not equal bounding_boxes
bounding_box_2[(15,)] = (-15, 15)
assert not bounding_box_1 == bounding_box_2
del bounding_box_2._bounding_boxes[(15,)]
assert bounding_box_1 == bounding_box_2
# Not equal selector_args
bounding_box_2._selector_args = _SelectorArguments.validate(Gaussian2D(), ((0, False),))
assert not bounding_box_1 == bounding_box_2
bounding_box_2._selector_args = _SelectorArguments.validate(Gaussian2D(), ((0, True),))
assert bounding_box_1 == bounding_box_2
# Not equal create_selector
bounding_box_2._create_selector = mk.MagicMock()
assert not bounding_box_1 == bounding_box_2
def test_validate(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
create_selector = mk.MagicMock()
# Fail selector_args
with pytest.raises(ValueError) as err:
CompoundBoundingBox.validate(model, bounding_boxes)
assert str(err.value) == ("Selector arguments must be provided "
"(can be passed as part of bounding_box argument)")
# Normal validate
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes, selector_args,
create_selector, order='F')
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == 'F'
# Re-validate
new_bounding_box = CompoundBoundingBox.validate(model, bounding_box)
assert bounding_box == new_bounding_box
assert new_bounding_box._order == 'F'
# Default order
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes, selector_args,
create_selector)
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == 'C'
def test___contains__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert (1,) in bounding_box
assert (2,) in bounding_box
assert (3,) not in bounding_box
assert 1 not in bounding_box
assert 2 not in bounding_box
def test__create_bounding_box(self):
model = Gaussian2D()
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox({}, model, ((1, False),),
create_selector)
# Create is successful
create_selector.return_value = ((-15, 15), (-23, 23))
assert len(bounding_box._bounding_boxes) == 0
bbox = bounding_box._create_bounding_box((7,))
assert isinstance(bbox, ModelBoundingBox)
assert bbox == ((-15, 15), (-23, 23))
assert len(bounding_box._bounding_boxes) == 1
assert (7,) in bounding_box
assert isinstance(bounding_box[(7,)], ModelBoundingBox)
assert bounding_box[(7,)] == bbox
# Create is unsuccessful
create_selector.return_value = (-42, 42)
with pytest.raises(ValueError):
bounding_box._create_bounding_box((27,))
def test___getitem__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
# already exists
assert isinstance(bounding_box[1], ModelBoundingBox)
assert bounding_box[1] == (-1, 1)
assert isinstance(bounding_box[(2,)], ModelBoundingBox)
assert bounding_box[2] == (-2, 2)
assert isinstance(bounding_box[(1,)], ModelBoundingBox)
assert bounding_box[(1,)] == (-1, 1)
assert isinstance(bounding_box[(2,)], ModelBoundingBox)
assert bounding_box[(2,)] == (-2, 2)
# no selector
with pytest.raises(RuntimeError) as err:
bounding_box[(3,)]
assert str(err.value) == "No bounding box is defined for selector: (3,)."
# Create a selector
bounding_box._create_selector = mk.MagicMock()
with mk.patch.object(CompoundBoundingBox, '_create_bounding_box',
autospec=True) as mkCreate:
assert bounding_box[(3,)] == mkCreate.return_value
assert mkCreate.call_args_list == [mk.call(bounding_box, (3,))]
def test__select_bounding_box(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
inputs = [mk.MagicMock() for _ in range(3)]
with mk.patch.object(_SelectorArguments, 'get_selector',
autospec=True) as mkSelector:
with mk.patch.object(CompoundBoundingBox, '__getitem__',
autospec=True) as mkGet:
assert bounding_box._select_bounding_box(inputs) == mkGet.return_value
assert mkGet.call_args_list == [mk.call(bounding_box, mkSelector.return_value)]
assert mkSelector.call_args_list == [mk.call(bounding_box.selector_args, *inputs)]
def test_prepare_inputs(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
input_shape = mk.MagicMock()
with mk.patch.object(ModelBoundingBox, 'prepare_inputs',
autospec=True) as mkPrepare:
assert bounding_box.prepare_inputs(input_shape, [1, 2, 3]) == mkPrepare.return_value
assert mkPrepare.call_args_list == [mk.call(bounding_box[(1,)], input_shape, [1, 2, 3])]
mkPrepare.reset_mock()
assert bounding_box.prepare_inputs(input_shape, [2, 2, 3]) == mkPrepare.return_value
assert mkPrepare.call_args_list == [mk.call(bounding_box[(2,)], input_shape, [2, 2, 3])]
mkPrepare.reset_mock()
def test__matching_bounding_boxes(self):
# Single selector index
selector_args = ((0, False),)
bounding_boxes = {
(1,): ((-1, 1), (-2, 2)),
(2,): ((-2, 2), (-3, 3)),
(3,): ((-3, 3), (-4, 4))
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
matching = bounding_box._matching_bounding_boxes('x', value)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert 'x' in bbox
assert 'x' in bbox.ignored_inputs
assert 'y' in bbox
assert bbox['y'] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
# Multiple selector index
selector_args = ((0, False), (1, False))
bounding_boxes = {
(1, 3): ((-1, 1), (-2, 2)),
(2, 2): ((-2, 2), (-3, 3)),
(3, 1): ((-3, 3), (-4, 4))
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
matching = bounding_box._matching_bounding_boxes('x', value)
assert isinstance(matching, dict)
assert (4 - value,) in matching
bbox = matching[(4 - value,)]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert 'x' in bbox
assert 'x' in bbox.ignored_inputs
assert 'y' in bbox
assert bbox['y'] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
matching = bounding_box._matching_bounding_boxes('y', value)
assert isinstance(matching, dict)
assert (4 - value,) in matching
bbox = matching[(4 - value,)]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert 'y' in bbox
assert 'y' in bbox.ignored_inputs
assert 'x' in bbox
assert bbox['x'] == (-(5 - value), (5 - value))
assert len(bbox.intervals) == 1
assert bbox.ignored == [1]
# Real fix input of slicing input
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5))
}
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes,
selector_args=[('slit_id', True)], order='F')
matching = bounding_box._matching_bounding_boxes('slit_id', 0)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ['slit_id']
assert bbox.named_intervals == {'x': (-0.5, 1047.5),
'y': (-0.5, 2047.5)}
assert bbox.order == 'F'
matching = bounding_box._matching_bounding_boxes('slit_id', 1)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ['slit_id']
assert bbox.named_intervals == {'x': (-0.5, 3047.5),
'y': (-0.5, 4047.5)}
assert bbox.order == 'F'
# Errors
with pytest.raises(ValueError) as err:
bounding_box._matching_bounding_boxes('slit_id', 2)
assert str(err.value) == ("Attempting to fix input slit_id, but "
"there are no bounding boxes for argument value 2.")
def test__fix_input_selector_arg(self):
# Single selector index
selector_args = ((0, False),)
bounding_boxes = {
(1,): ((-1, 1), (-2, 2)),
(2,): ((-2, 2), (-3, 3)),
(3,): ((-3, 3), (-4, 4))
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
bbox = bounding_box._fix_input_selector_arg('x', value)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert 'x' in bbox
assert 'x' in bbox.ignored_inputs
assert 'y' in bbox
assert bbox['y'] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
# Multiple selector index
selector_args = ((0, False), (1, False))
bounding_boxes = {
(1, 3): ((-1, 1), (-2, 2)),
(2, 2): ((-2, 2), (-3, 3)),
(3, 1): ((-3, 3), (-4, 4))
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
bbox = bounding_box._fix_input_selector_arg('x', value)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert bbox.selector_args == ((1, False),)
assert (4 - value,) in bbox
bbox_selector = bbox[(4 - value,)]
assert isinstance(bbox_selector, ModelBoundingBox)
assert (bbox_selector._model.parameters == Gaussian2D().parameters).all()
assert 'x' in bbox_selector
assert 'x' in bbox_selector.ignored_inputs
assert 'y' in bbox_selector
assert bbox_selector['y'] == (-value, value)
assert len(bbox_selector.intervals) == 1
assert bbox_selector.ignored == [0]
bbox = bounding_box._fix_input_selector_arg('y', value)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert bbox.selector_args == ((0, False),)
assert (4 - value,) in bbox
bbox_selector = bbox[(4 - value,)]
assert isinstance(bbox_selector, ModelBoundingBox)
assert (bbox_selector._model.parameters == Gaussian2D().parameters).all()
assert 'y' in bbox_selector
assert 'y' in bbox_selector.ignored_inputs
assert 'x' in bbox_selector
assert bbox_selector['x'] == (-(5 - value), (5 - value))
assert len(bbox_selector.intervals) == 1
assert bbox_selector.ignored == [1]
# Real fix input of slicing input
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5))
}
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes,
selector_args=[('slit_id', True)], order='F')
bbox = bounding_box._fix_input_selector_arg('slit_id', 0)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ['slit_id']
assert bbox.named_intervals == {'x': (-0.5, 1047.5),
'y': (-0.5, 2047.5)}
assert bbox.order == 'F'
bbox = bounding_box._fix_input_selector_arg('slit_id', 1)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ['slit_id']
assert bbox.named_intervals == {'x': (-0.5, 3047.5),
'y': (-0.5, 4047.5)}
assert bbox.order == 'F'
def test__fix_input_bbox_arg(self):
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5))
}
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes,
selector_args=[('slit_id', True)], order='F')
bbox = bounding_box._fix_input_bbox_arg('x', 5)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((2, True),)
assert bbox.selector_args._kept_ignore == [0]
assert bbox._bounding_boxes[(0,)] == (-0.5, 2047.5)
assert bbox._bounding_boxes[(1,)] == (-0.5, 4047.5)
assert len(bbox._bounding_boxes) == 2
bbox = bounding_box._fix_input_bbox_arg('y', 5)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((2, True),)
assert bbox.selector_args._kept_ignore == [1]
assert bbox._bounding_boxes[(0,)] == (-0.5, 1047.5)
assert bbox._bounding_boxes[(1,)] == (-0.5, 3047.5)
assert len(bbox._bounding_boxes) == 2
def test_fix_inputs(self):
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5))
}
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes,
selector_args=[('slit_id', True)], order='F')
model.bounding_box = bounding_box
# Fix selector argument
new_model = fix_inputs(model, {'slit_id': 0})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {'x': (-0.5, 1047.5),
'y': (-0.5, 2047.5)}
assert bbox.order == 'F'
# Fix a bounding_box field
new_model = fix_inputs(model, {'x': 5})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((1, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-0.5, 2047.5)
assert bbox._bounding_boxes[(0,)].order == 'F'
assert bbox._bounding_boxes[(1,)] == (-0.5, 4047.5)
assert bbox._bounding_boxes[(1,)].order == 'F'
assert len(bbox._bounding_boxes) == 2
new_model = fix_inputs(model, {'y': 5})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((1, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-0.5, 1047.5)
assert bbox._bounding_boxes[(0,)].order == 'F'
assert bbox._bounding_boxes[(1,)] == (-0.5, 3047.5)
assert bbox._bounding_boxes[(1,)].order == 'F'
assert len(bbox._bounding_boxes) == 2
# Fix selector argument and a bounding_box field
new_model = fix_inputs(model, {'slit_id': 0, 'x': 5})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {'y': (-0.5, 2047.5)}
assert bbox.order == 'F'
new_model = fix_inputs(model, {'y': 5, 'slit_id': 1})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {'x': (-0.5, 3047.5)}
assert bbox.order == 'F'
# Fix two bounding_box fields
new_model = fix_inputs(model, {'x': 5, 'y': 7})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert bbox.selector_args == ((0, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-np.inf, np.inf)
assert bbox._bounding_boxes[(0,)].order == 'F'
assert bbox._bounding_boxes[(1,)] == (-np.inf, np.inf)
assert bbox._bounding_boxes[(1,)].order == 'F'
assert len(bbox._bounding_boxes) == 2
def test_complex_compound_bounding_box(self):
model = Identity(4)
bounding_boxes = {
(2.5, 1.3): ((-1, 1), (-3, 3)),
(2.5, 2.71): ((-3, 3), (-1, 1))
}
selector_args = (('x0', True), ('x1', True))
bbox = CompoundBoundingBox.validate(model, bounding_boxes, selector_args)
assert bbox[(2.5, 1.3)] == ModelBoundingBox(((-1, 1), (-3, 3)),
model, ignored=['x0', 'x1'])
assert bbox[(2.5, 2.71)] == ModelBoundingBox(((-3, 3), (-1, 1)),
model, ignored=['x0', 'x1'])
|
faa6758cad40b2f0f052dee47c2dab95990c0fdb8b54726dbd9ff206345a9b73 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for spline models and fitters"""
import unittest.mock as mk
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy.modeling.core import FittableModel, ModelDefinitionError
from astropy.modeling.fitting import (
SplineExactKnotsFitter, SplineInterpolateFitter, SplineSmoothingFitter, SplineSplrepFitter)
from astropy.modeling.parameters import Parameter
from astropy.modeling.spline import Spline1D, _Spline, _SplineFitter
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa: F401
# pylint: disable=invalid-name
from astropy.utils.exceptions import AstropyUserWarning
npts = 50
nknots = 10
np.random.seed(42)
test_w = np.random.rand(npts)
test_t = [-1, 0, 1]
noise = np.random.randn(npts)
degree_tests = [1, 2, 3, 4, 5]
wieght_tests = [None, test_w]
smoothing_tests = [None, 0.01]
class TestSpline:
def setup_class(self):
self.num_opt = 3
self.optional_inputs = {f'test{i}': mk.MagicMock() for i in range(self.num_opt)}
self.extra_kwargs = {f'new{i}': mk.MagicMock() for i in range(self.num_opt)}
class Spline(_Spline):
optional_inputs = {'test': 'test'}
def _init_parameters(self):
super()._init_parameters()
def _init_data(self, knots, coeffs, bounds=None):
super()._init_data(knots, coeffs, bounds=bounds)
self.Spline = Spline
def test___init__(self):
# empty spline
spl = self.Spline()
assert spl._t is None
assert spl._c is None
assert spl._user_knots is False
assert spl._degree is None
assert spl._test is None
assert not hasattr(spl, 'degree')
# Call _init_spline
with mk.patch.object(_Spline, '_init_spline',
autospec=True) as mkInit:
# No call (knots=None)
spl = self.Spline()
assert mkInit.call_args_list == []
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
spl = self.Spline(knots=knots, coeffs=coeffs, bounds=bounds)
assert mkInit.call_args_list == [mk.call(spl, knots, coeffs, bounds)]
assert spl._t is None
assert spl._c is None
assert spl._user_knots is False
assert spl._degree is None
assert spl._test is None
# Coeffs but no knots
with pytest.raises(ValueError) as err:
self.Spline(coeffs=mk.MagicMock())
assert str(err.value) == "If one passes a coeffs vector one needs to also pass knots!"
def test_param_names(self):
# no parameters
spl = self.Spline()
assert spl.param_names == ()
knot_names = tuple(mk.MagicMock() for _ in range(3))
spl._knot_names = knot_names
assert spl.param_names == knot_names
coeff_names = tuple(mk.MagicMock() for _ in range(3))
spl._coeff_names = coeff_names
assert spl.param_names == knot_names + coeff_names
def test__optional_arg(self):
spl = self.Spline()
assert spl._optional_arg('test') == '_test'
def test__create_optional_inputs(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
def __init__(self):
self._create_optional_inputs()
spl = Spline()
for arg in self.optional_inputs:
attribute = spl._optional_arg(arg)
assert hasattr(spl, attribute)
assert getattr(spl, attribute) is None
with pytest.raises(ValueError,
match=r"Optional argument .* already exists in this class!"):
spl._create_optional_inputs()
def test__intercept_optional_inputs(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
def __init__(self):
self._create_optional_inputs()
spl = Spline()
new_kwargs = spl._intercept_optional_inputs(**self.extra_kwargs)
for arg, value in self.optional_inputs.items():
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is None
assert new_kwargs == self.extra_kwargs
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
new_kwargs = spl._intercept_optional_inputs(**kwargs)
for arg, value in self.optional_inputs.items():
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is not None
assert getattr(spl, attribute) == kwargs[arg]
assert getattr(spl, attribute) != value
assert arg not in new_kwargs
assert new_kwargs == self.extra_kwargs
assert kwargs != self.extra_kwargs
with pytest.raises(RuntimeError,
match=r".* has already been set, something has gone wrong!"):
spl._intercept_optional_inputs(**kwargs)
def test_evaluate(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
spl = Spline()
# No options passed in and No options set
new_kwargs = spl.evaluate(**self.extra_kwargs)
for arg, value in self.optional_inputs.items():
assert new_kwargs[arg] == value
for arg, value in self.extra_kwargs.items():
assert new_kwargs[arg] == value
assert len(new_kwargs) == (len(self.optional_inputs) + len(self.extra_kwargs))
# No options passed in and Options set
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
spl._intercept_optional_inputs(**kwargs)
new_kwargs = spl.evaluate(**self.extra_kwargs)
assert new_kwargs == kwargs
for arg in self.optional_inputs:
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is None
# Options passed in
set_kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
spl._intercept_optional_inputs(**set_kwargs)
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
assert set_kwargs != kwargs
new_kwargs = spl.evaluate(**kwargs)
assert new_kwargs == kwargs
def test___call__(self):
spl = self.Spline()
args = tuple(mk.MagicMock() for _ in range(3))
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
new_kwargs = {f"new_test{idx}": mk.MagicMock() for idx in range(3)}
with mk.patch.object(_Spline, "_intercept_optional_inputs",
autospec=True, return_value=new_kwargs) as mkIntercept:
with mk.patch.object(FittableModel, "__call__",
autospec=True) as mkCall:
assert mkCall.return_value == spl(*args, **kwargs)
assert mkCall.call_args_list == [mk.call(spl, *args, **new_kwargs)]
assert mkIntercept.call_args_list == [mk.call(spl, **kwargs)]
def test__create_parameter(self):
np.random.seed(37)
base_vec = np.random.random(20)
test = base_vec.copy()
fixed_test = base_vec.copy()
class Spline(self.Spline):
@property
def test(self):
return test
@property
def fixed_test(self):
return fixed_test
spl = Spline()
assert (spl.test == test).all()
assert (spl.fixed_test == fixed_test).all()
for index in range(20):
name = f"test_name{index}"
spl._create_parameter(name, index, 'test')
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.model == spl
assert param.fixed is False
assert param.value == test[index] == spl.test[index] == base_vec[index]
new_set = np.random.random()
param.value = new_set
assert spl.test[index] == new_set
assert spl.test[index] != base_vec[index]
new_get = np.random.random()
spl.test[index] = new_get
assert param.value == new_get
assert param.value != new_set
for index in range(20):
name = f"fixed_test_name{index}"
spl._create_parameter(name, index, 'fixed_test', True)
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.model == spl
assert param.fixed is True
assert param.value == fixed_test[index] == spl.fixed_test[index] == base_vec[index]
new_set = np.random.random()
param.value = new_set
assert spl.fixed_test[index] == new_set
assert spl.fixed_test[index] != base_vec[index]
new_get = np.random.random()
spl.fixed_test[index] = new_get
assert param.value == new_get
assert param.value != new_set
def test__create_parameters(self):
np.random.seed(37)
test = np.random.random(20)
class Spline(self.Spline):
@property
def test(self):
return test
spl = Spline()
fixed = mk.MagicMock()
with mk.patch.object(_Spline, '_create_parameter',
autospec=True) as mkCreate:
params = spl._create_parameters("test_param", "test", fixed)
assert params == tuple(f"test_param{idx}" for idx in range(20))
assert mkCreate.call_args_list == [
mk.call(spl, f"test_param{idx}", idx, 'test', fixed) for idx in range(20)
]
def test__init_parameters(self):
spl = self.Spline()
with pytest.raises(NotImplementedError) as err:
spl._init_parameters()
assert str(err.value) == "This needs to be implemented"
def test__init_data(self):
spl = self.Spline()
with pytest.raises(NotImplementedError) as err:
spl._init_data(mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
assert str(err.value) == "This needs to be implemented"
with pytest.raises(NotImplementedError) as err:
spl._init_data(mk.MagicMock(), mk.MagicMock())
assert str(err.value) == "This needs to be implemented"
def test__init_spline(self):
spl = self.Spline()
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
with mk.patch.object(_Spline, "_init_parameters",
autospec=True) as mkParameters:
with mk.patch.object(_Spline, "_init_data",
autospec=True) as mkData:
main = mk.MagicMock()
main.attach_mock(mkParameters, 'parameters')
main.attach_mock(mkData, 'data')
spl._init_spline(knots, coeffs, bounds)
assert main.mock_calls == [
mk.call.data(spl, knots, coeffs, bounds=bounds),
mk.call.parameters(spl)
]
def test__init_tck(self):
spl = self.Spline()
assert spl._c is None
assert spl._t is None
assert spl._degree is None
spl = self.Spline(degree=4)
assert spl._c is None
assert spl._t is None
assert spl._degree == 4
@pytest.mark.skipif('not HAS_SCIPY')
class TestSpline1D:
def setup_class(self):
def func(x, noise=0):
return np.exp(-x**2) + 0.1*noise
self.x = np.linspace(-3, 3, npts)
self.y = func(self.x, noise)
self.truth = func(self.x)
arg_sort = np.argsort(self.x)
np.random.shuffle(arg_sort)
self.x_s = self.x[arg_sort]
self.y_s = func(self.x_s, noise[arg_sort])
self.npts_out = 1000
self.xs = np.linspace(-3, 3, self.npts_out)
self.t = np.linspace(-3, 3, nknots)[1:-1]
def check_parameter(self, spl, base_name, name, index, value, fixed):
assert base_name in name
assert index == int(name.split(base_name)[-1])
knot_name = f"{base_name}{index}"
assert knot_name == name
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.name == name
assert param.value == value(index)
assert param.model == spl
assert param.fixed is fixed
def check_parameters(self, spl, params, base_name, value, fixed):
for idx, name in enumerate(params):
self.check_parameter(spl, base_name, name, idx, value, fixed)
def update_parameters(self, spl, knots, value):
for name in knots:
param = getattr(spl, name)
param.value = value
assert param.value == value
def test___init__with_no_knot_information(self):
spl = Spline1D()
assert spl._degree == 3
assert spl._user_knots is False
assert spl._t is None
assert spl._c is None
assert spl._nu is None
# Check no parameters created
assert len(spl._knot_names) == 0
assert len(spl._coeff_names) == 0
def test___init__with_number_of_knots(self):
spl = Spline1D(knots=10)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is False
assert spl._nu is None
# Check vector data
assert len(spl._t) == 18
t = np.zeros(18)
t[-4:] = 1
assert (spl._t == t).all()
assert len(spl._c) == 18
assert (spl._c == np.zeros(18)).all()
# Check all parameter names created:
assert len(spl._knot_names) == 18
assert len(spl._coeff_names) == 18
# Check knot values:
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values:
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_full_custom_knots(self):
t = 17*np.arange(20) - 32
spl = Spline1D(knots=t)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert (spl._t == t).all()
assert len(spl._c) == 20
assert (spl._c == np.zeros(20)).all()
# Check all parameter names created
assert len(spl._knot_names) == 20
assert len(spl._coeff_names) == 20
# Check knot values:
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_interior_custom_knots(self):
t = np.arange(1, 20)
spl = Spline1D(knots=t, bounds=[0, 20])
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert len(spl._t) == 27
assert (spl._t[4:-4] == t).all()
assert (spl._t[:4] == 0).all()
assert (spl._t[-4:] == 20).all()
assert len(spl._c) == 27
assert (spl._c == np.zeros(27)).all()
# Check knot values:
def value0(idx):
if idx < 4:
return 0
elif idx >= 19 + 4:
return 20
else:
return t[idx-4]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_user_knots_and_coefficients(self):
t = 17*np.arange(20) - 32
c = np.linspace(-1, 1, 20)
spl = Spline1D(knots=t, coeffs=c)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert (spl._t == t).all()
assert len(spl._c) == 20
assert (spl._c == c).all()
# Check all parameter names created
assert len(spl._knot_names) == 20
assert len(spl._coeff_names) == 20
# Check knot values:
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__errors(self):
# Bad knot type
knots = 3.5
with pytest.raises(ValueError) as err:
Spline1D(knots=knots)
assert str(err.value) == f"Knots: {knots} must be iterable or value"
# Not enough knots
for idx in range(8):
with pytest.raises(ValueError) as err:
Spline1D(knots=np.arange(idx))
assert str(err.value) == "Must have at least 8 knots."
# Bad scipy spline
t = np.arange(20)[::-1]
with pytest.raises(ValueError):
Spline1D(knots=t)
def test_parameter_array_link(self):
spl = Spline1D(10)
# Check knot base values
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check knot vector -> knot parameter link
t = np.arange(18)
spl._t = t.copy()
def value1(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value1, True)
# Check knot parameter -> knot vector link
self.update_parameters(spl, spl._knot_names, 3)
assert (spl._t[:] == 3).all()
# Check coeff base values
def value2(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value2, False)
# Check coeff vector -> coeff parameter link
c = 5 * np.arange(18) + 18
spl._c = c.copy()
def value3(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value3, False)
# Check coeff parameter -> coeff vector link
self.update_parameters(spl, spl._coeff_names, 4)
assert (spl._c[:] == 4).all()
def test_two_splines(self):
spl0 = Spline1D(knots=10)
spl1 = Spline1D(knots=15, degree=2)
assert spl0._degree == 3
assert len(spl0._t) == 18
t = np.zeros(18)
t[-4:] = 1
assert (spl0._t == t).all()
assert len(spl0._c) == 18
assert (spl0._c == np.zeros(18)).all()
assert spl1._degree == 2
assert len(spl1._t) == 21
t = np.zeros(21)
t[-3:] = 1
assert (spl1._t == t).all()
assert len(spl1._c) == 21
assert (spl1._c == np.zeros(21)).all()
# Check all knot names created
assert len(spl0._knot_names) == 18
assert len(spl1._knot_names) == 21
# Check knot base values
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl0, spl0._knot_names, "knot", value0, True)
def value1(idx):
if idx < 21 - 3:
return 0
else:
return 1
self.check_parameters(spl1, spl1._knot_names, "knot", value1, True)
# Check knot vector -> knot parameter link
t0 = 7 * np.arange(18) + 27
t1 = 11 * np.arange(21) + 19
spl0._t[:] = t0.copy()
spl1._t[:] = t1.copy()
def value2(idx):
return t0[idx]
self.check_parameters(spl0, spl0._knot_names, "knot", value2, True)
def value3(idx):
return t1[idx]
self.check_parameters(spl1, spl1._knot_names, "knot", value3, True)
# Check knot parameter -> knot vector link
self.update_parameters(spl0, spl0._knot_names, 3)
self.update_parameters(spl1, spl1._knot_names, 4)
assert (spl0._t[:] == 3).all()
assert (spl1._t[:] == 4).all()
# Check all coeff names created
assert len(spl0._coeff_names) == 18
assert len(spl1._coeff_names) == 21
# Check coeff base values
def value4(idx):
return 0
self.check_parameters(spl0, spl0._coeff_names, "coeff", value4, False)
self.check_parameters(spl1, spl1._coeff_names, "coeff", value4, False)
# Check coeff vector -> coeff parameter link
c0 = 17 * np.arange(18) + 14
c1 = 37 * np.arange(21) + 47
spl0._c[:] = c0.copy()
spl1._c[:] = c1.copy()
def value5(idx):
return c0[idx]
self.check_parameters(spl0, spl0._coeff_names, "coeff", value5, False)
def value6(idx):
return c1[idx]
self.check_parameters(spl1, spl1._coeff_names, "coeff", value6, False)
# Check coeff parameter -> coeff vector link
self.update_parameters(spl0, spl0._coeff_names, 5)
self.update_parameters(spl1, spl1._coeff_names, 6)
assert (spl0._t[:] == 3).all()
assert (spl1._t[:] == 4).all()
assert (spl0._c[:] == 5).all()
assert (spl1._c[:] == 6).all()
def test__knot_names(self):
# no parameters
spl = Spline1D()
assert spl._knot_names == ()
# some parameters
knot_names = [f"knot{idx}" for idx in range(18)]
spl = Spline1D(10)
assert spl._knot_names == tuple(knot_names)
def test__coeff_names(self):
# no parameters
spl = Spline1D()
assert spl._coeff_names == ()
# some parameters
coeff_names = [f"coeff{idx}" for idx in range(18)]
spl = Spline1D(10)
assert spl._coeff_names == tuple(coeff_names)
def test_param_names(self):
# no parameters
spl = Spline1D()
assert spl.param_names == ()
# some parameters
knot_names = [f"knot{idx}" for idx in range(18)]
coeff_names = [f"coeff{idx}" for idx in range(18)]
param_names = knot_names + coeff_names
spl = Spline1D(10)
assert spl.param_names == tuple(param_names)
def test_t(self):
# no parameters
spl = Spline1D()
# test get
assert spl._t is None
assert (spl.t == [0, 0, 0, 0, 1, 1, 1, 1]).all()
# test set
with pytest.raises(ValueError) as err:
spl.t = mk.MagicMock()
assert str(err.value) == "The model parameters must be initialized before setting knots."
# with parameters
spl = Spline1D(10)
# test get
t = np.zeros(18)
t[-4:] = 1
assert (spl._t == t).all()
assert (spl.t == t).all()
# test set
spl.t = (np.arange(18) + 15)
assert (spl._t == (np.arange(18) + 15)).all()
assert (spl.t == (np.arange(18) + 15)).all()
assert (spl.t != t).all()
# set error
for idx in range(30):
if idx == 18:
continue
with pytest.raises(ValueError) as err:
spl.t = np.arange(idx)
assert str(err.value) == "There must be exactly as many knots as previously defined."
def test_c(self):
# no parameters
spl = Spline1D()
# test get
assert spl._c is None
assert (spl.c == [0, 0, 0, 0, 0, 0, 0, 0]).all()
# test set
with pytest.raises(ValueError) as err:
spl.c = mk.MagicMock()
assert str(err.value) == "The model parameters must be initialized before setting coeffs."
# with parameters
spl = Spline1D(10)
# test get
assert (spl._c == np.zeros(18)).all()
assert (spl.c == np.zeros(18)).all()
# test set
spl.c = (np.arange(18) + 15)
assert (spl._c == (np.arange(18) + 15)).all()
assert (spl.c == (np.arange(18) + 15)).all()
assert (spl.c != np.zeros(18)).all()
# set error
for idx in range(30):
if idx == 18:
continue
with pytest.raises(ValueError) as err:
spl.c = np.arange(idx)
assert str(err.value) == "There must be exactly as many coeffs as previously defined."
def test_degree(self):
# default degree
spl = Spline1D()
# test get
assert spl._degree == 3
assert spl.degree == 3
# test set
# non-default degree
spl = Spline1D(degree=2)
# test get
assert spl._degree == 2
assert spl.degree == 2
def test__initialized(self):
# no parameters
spl = Spline1D()
assert spl._initialized is False
# with parameters
spl = Spline1D(knots=10, degree=2)
assert spl._initialized is True
def test_tck(self):
# no parameters
spl = Spline1D()
# test get
assert (spl.t == [0, 0, 0, 0, 1, 1, 1, 1]).all()
assert (spl.c == [0, 0, 0, 0, 0, 0, 0, 0]).all()
assert spl.degree == 3
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# test set
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
t = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 5, 5])
np.random.seed(619)
c = np.random.random(12)
k = 3
spl.tck = (t, c, k)
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
def value1(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# with parameters
spl = Spline1D(knots=10, degree=2)
# test get
t = np.zeros(16)
t[-3:] = 1
assert (spl.t == t).all()
assert (spl.c == np.zeros(16)).all()
assert spl.degree == 2
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# test set
t = 5*np.arange(16) + 11
c = 7*np.arange(16) + 13
k = 2
spl.tck = (t, c, k)
assert (spl.t == t).all()
assert (spl.c == c).all()
assert spl.degree == k
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# Error
with pytest.raises(ValueError) as err:
spl.tck = (t, c, 4)
assert str(err.value) == "tck has incompatible degree!"
def test_bspline(self):
from scipy.interpolate import BSpline
# no parameters
spl = Spline1D()
bspline = spl.bspline
assert isinstance(bspline, BSpline)
assert (bspline.tck[0] == spl.tck[0]).all()
assert (bspline.tck[1] == spl.tck[1]).all()
assert bspline.tck[2] == spl.tck[2]
t = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 5, 5])
np.random.seed(619)
c = np.random.random(12)
k = 3
def value0(idx):
return t[idx]
def value1(idx):
return c[idx]
# set (bspline)
spl = Spline1D()
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
bspline = BSpline(t, c, k)
spl.bspline = bspline
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# set (tuple spline)
spl = Spline1D()
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
spl.bspline = (t, c, k)
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# with parameters
spl = Spline1D(knots=10, degree=2)
bspline = spl.bspline
assert isinstance(bspline, BSpline)
assert (bspline.tck[0] == spl.tck[0]).all()
assert (bspline.tck[1] == spl.tck[1]).all()
assert bspline.tck[2] == spl.tck[2]
def test_knots(self):
# no parameters
spl = Spline1D()
assert spl.knots == []
# with parameters
spl = Spline1D(10)
knots = spl.knots
assert len(knots) == 18
for knot in knots:
assert isinstance(knot, Parameter)
assert hasattr(spl, knot.name)
assert getattr(spl, knot.name) == knot
def test_coeffs(self):
# no parameters
spl = Spline1D()
assert spl.coeffs == []
# with parameters
spl = Spline1D(10)
coeffs = spl.coeffs
assert len(coeffs) == 18
for coeff in coeffs:
assert isinstance(coeff, Parameter)
assert hasattr(spl, coeff.name)
assert getattr(spl, coeff.name) == coeff
def test__init_parameters(self):
spl = Spline1D()
with mk.patch.object(Spline1D, '_create_parameters',
autospec=True) as mkCreate:
spl._init_parameters()
assert mkCreate.call_args_list == [
mk.call(spl, "knot", "t", fixed=True),
mk.call(spl, "coeff", "c")
]
def test__init_bounds(self):
spl = Spline1D()
has_bounds, lower, upper = spl._init_bounds()
assert has_bounds is False
assert (lower == [0, 0, 0, 0]).all()
assert (upper == [1, 1, 1, 1]).all()
assert spl._user_bounding_box is None
has_bounds, lower, upper = spl._init_bounds((-5, 5))
assert has_bounds is True
assert (lower == [-5, -5, -5, -5]).all()
assert (upper == [5, 5, 5, 5]).all()
assert spl._user_bounding_box == (-5, 5)
def test__init_knots(self):
np.random.seed(19)
lower = np.random.random(4)
upper = np.random.random(4)
# Integer
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
spl = Spline1D()
assert spl._t is None
spl._init_knots(10, mk.MagicMock(), lower, upper)
t = np.concatenate((lower, np.zeros(10), upper))
assert (spl._t == t).all()
assert mkBspline.call_args_list == [mk.call()]
# vector with bounds
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
knots = np.random.random(10)
spl = Spline1D()
assert spl._t is None
spl._init_knots(knots, True, lower, upper)
t = np.concatenate((lower, knots, upper))
assert (spl._t == t).all()
assert mkBspline.call_args_list == [mk.call()]
# vector with no bounds
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
knots = np.random.random(10)
spl = Spline1D()
assert spl._t is None
spl._init_knots(knots, False, lower, upper)
assert (spl._t == knots).all()
assert mkBspline.call_args_list == [mk.call()]
# error
for num in range(8):
knots = np.random.random(num)
spl = Spline1D()
assert spl._t is None
with pytest.raises(ValueError) as err:
spl._init_knots(knots, False, lower, upper)
assert str(err.value) == "Must have at least 8 knots."
# Error
spl = Spline1D()
assert spl._t is None
with pytest.raises(ValueError) as err:
spl._init_knots(0.5, False, lower, upper)
assert str(err.value) == "Knots: 0.5 must be iterable or value"
def test__init_coeffs(self):
np.random.seed(492)
# No coeffs
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
spl = Spline1D()
assert spl._c is None
spl._t = [1, 2, 3, 4]
spl._init_coeffs()
assert (spl._c == [0, 0, 0, 0]).all()
assert mkBspline.call_args_list == [mk.call()]
# Some coeffs
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
coeffs = np.random.random(10)
spl = Spline1D()
assert spl._c is None
spl._init_coeffs(coeffs)
assert (spl._c == coeffs).all()
assert mkBspline.call_args_list == [mk.call()]
def test__init_data(self):
spl = Spline1D()
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
has_bounds = mk.MagicMock()
lower = mk.MagicMock()
upper = mk.MagicMock()
with mk.patch.object(Spline1D, '_init_bounds', autospec=True,
return_value=(has_bounds, lower, upper)) as mkBounds:
with mk.patch.object(Spline1D, '_init_knots',
autospec=True) as mkKnots:
with mk.patch.object(Spline1D, '_init_coeffs',
autospec=True) as mkCoeffs:
main = mk.MagicMock()
main.attach_mock(mkBounds, 'bounds')
main.attach_mock(mkKnots, 'knots')
main.attach_mock(mkCoeffs, 'coeffs')
spl._init_data(knots, coeffs, bounds)
assert main.mock_calls == [
mk.call.bounds(spl, bounds),
mk.call.knots(spl, knots, has_bounds, lower, upper),
mk.call.coeffs(spl, coeffs)
]
def test_evaluate(self):
spl = Spline1D()
args = tuple(mk.MagicMock() for _ in range(3))
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
new_kwargs = {f"new_test{idx}": mk.MagicMock() for idx in range(3)}
with mk.patch.object(_Spline, 'evaluate', autospec=True,
return_value=new_kwargs) as mkEval:
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
assert mkBspline.return_value.return_value == spl.evaluate(*args, **kwargs)
assert mkBspline.return_value.call_args_list == [mk.call(args[0], **new_kwargs)]
assert mkBspline.call_args_list == [mk.call()]
assert mkEval.call_args_list == [mk.call(spl, *args, **kwargs)]
# Error
for idx in range(5, 8):
with mk.patch.object(_Spline, 'evaluate', autospec=True,
return_value={'nu': idx}):
with pytest.raises(RuntimeError) as err:
spl.evaluate(*args, **kwargs)
assert str(err.value) == "Cannot evaluate a derivative of order higher than 4"
def check_knots_created(self, spl, k):
def value0(idx):
return self.x[0]
def value1(idx):
return self.x[-1]
for idx in range(k + 1):
name = f"knot{idx}"
self.check_parameter(spl, "knot", name, idx, value0, True)
index = len(spl.t) - (k + 1) + idx
name = f"knot{index}"
self.check_parameter(spl, "knot", name, index, value1, True)
def value3(idx):
return spl.t[idx]
assert len(spl._knot_names) == len(spl.t)
for idx, name in enumerate(spl._knot_names):
assert name == f"knot{idx}"
self.check_parameter(spl, "knot", name, idx, value3, True)
def check_coeffs_created(self, spl):
def value(idx):
return spl.c[idx]
assert len(spl._coeff_names) == len(spl.c)
for idx, name in enumerate(spl._coeff_names):
assert name == f"coeff{idx}"
self.check_parameter(spl, "coeff", name, idx, value, False)
@staticmethod
def check_base_spline(spl, t, c, k):
"""Check the base spline form"""
if t is None:
assert spl._t is None
else:
assert_allclose(spl._t, t)
if c is None:
assert spl._c is None
else:
assert_allclose(spl._c, c)
assert spl.degree == k
assert spl._bounding_box is None
def check_spline_fit(self, fit_spl, spline, fitter, atol_fit, atol_truth):
"""Check the spline fit"""
assert_allclose(fit_spl.t, spline._eval_args[0])
assert_allclose(fit_spl.c, spline._eval_args[1])
assert_allclose(fitter.fit_info['spline']._eval_args[0], spline._eval_args[0])
assert_allclose(fitter.fit_info['spline']._eval_args[1], spline._eval_args[1])
# check that _parameters are correct
assert len(fit_spl._parameters) == len(fit_spl.t) + len(fit_spl.c)
assert_allclose(fit_spl._parameters[:len(fit_spl.t)], fit_spl.t)
assert_allclose(fit_spl._parameters[len(fit_spl.t):], fit_spl.c)
# check that parameters are correct
assert len(fit_spl.parameters) == len(fit_spl.t) + len(fit_spl.c)
assert_allclose(fit_spl.parameters[:len(fit_spl.t)], fit_spl.t)
assert_allclose(fit_spl.parameters[len(fit_spl.t):], fit_spl.c)
assert_allclose(spline.get_residual(), fitter.fit_info['resid'])
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), fitter.fit_info['spline'](self.x))
assert_allclose(fit_spl(self.x), self.y, atol=atol_fit)
assert_allclose(fit_spl(self.x), self.truth, atol=atol_truth)
def check_bbox(self, spl, fit_spl, fitter, w, **kwargs):
"""Check the spline fit with bbox option"""
bbox = [self.x[0], self.x[-1]]
bbox_spl = fitter(spl, self.x, self.y, weights=w, bbox=bbox, **kwargs)
assert bbox_spl.bounding_box == tuple(bbox)
assert_allclose(fit_spl.t, bbox_spl.t)
assert_allclose(fit_spl.c, bbox_spl.c)
def check_knots_warning(self, fitter, knots, k, w, **kwargs):
"""Check that the knots warning is raised"""
spl = Spline1D(knots=knots, degree=k)
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, weights=w, **kwargs)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
def test_interpolate_fitter(self, w, k):
fitter = SplineInterpolateFitter()
assert fitter.fit_info == {'resid': None, 'spline': None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, None, None, k)
assert len(fit_spl.t) == (len(self.x) + k + 1) == len(fit_spl._knot_names)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import InterpolatedUnivariateSpline, UnivariateSpline
spline = InterpolatedUnivariateSpline(self.x, self.y, w=w, k=k)
assert isinstance(fitter.fit_info['spline'], UnivariateSpline)
assert spline.get_residual() == 0
self.check_spline_fit(fit_spl, spline, fitter, 0, 1)
self.check_bbox(spl, fit_spl, fitter, w)
knots = np.linspace(self.x[0], self.x[-1], len(self.x) + k + 1)
self.check_knots_warning(fitter, knots, k, w)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
@pytest.mark.parametrize('s', smoothing_tests)
def test_smoothing_fitter(self, w, k, s):
fitter = SplineSmoothingFitter()
assert fitter.fit_info == {'resid': None, 'spline': None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, s=s, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(self.x, self.y, w=w, k=k, s=s)
assert isinstance(fitter.fit_info['spline'], UnivariateSpline)
self.check_spline_fit(fit_spl, spline, fitter, 1, 1)
self.check_bbox(spl, fit_spl, fitter, w, s=s)
# test warning
knots = fit_spl.t.copy()
self.check_knots_warning(fitter, knots, k, w, s=s)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
def test_exact_knots_fitter(self, w, k):
fitter = SplineExactKnotsFitter()
assert fitter.fit_info == {'resid': None, 'spline': None}
knots = [-1, 0, 1]
t = np.concatenate(([self.x[0]]*(k + 1), knots, [self.x[-1]]*(k + 1)))
c = np.zeros(len(t))
# With knots preset
spl = Spline1D(knots=knots, degree=k, bounds=[self.x[0], self.x[-1]])
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
assert len(fit_spl.t) == len(t) == len(fit_spl._knot_names)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
spline = LSQUnivariateSpline(self.x, self.y, knots, w=w, k=k)
assert isinstance(fitter.fit_info['spline'], UnivariateSpline)
assert_allclose(spline.get_residual(), 0.1, atol=1)
assert_allclose(fitter.fit_info['spline'].get_residual(), 0.1, atol=1)
self.check_spline_fit(fit_spl, spline, fitter, 1, 1)
self.check_bbox(spl, fit_spl, fitter, w)
# Pass knots via fitter function
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, t=knots, weights=w)
# pass no knots
spl = Spline1D(degree=k)
with pytest.raises(RuntimeError) as err:
fitter(spl, self.x, self.y, weights=w)
assert str(err.value) == "No knots have been provided"
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
@pytest.mark.parametrize('s', smoothing_tests)
def test_splrep_fitter_no_knots(self, w, k, s):
fitter = SplineSplrepFitter()
assert fitter.fit_info == {'fp': None, 'ier': None, 'msg': None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, s=s, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import BSpline, splrep
tck, spline_fp, spline_ier, spline_msg = splrep(self.x, self.y,
w=w, k=k, s=s, full_output=1)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
assert fitter.fit_info['fp'] == spline_fp
assert fitter.fit_info['ier'] == spline_ier
assert fitter.fit_info['msg'] == spline_msg
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w, s=s)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
def test_splrep_fitter_with_knots(self, w, k):
fitter = SplineSplrepFitter()
assert fitter.fit_info == {'fp': None, 'ier': None, 'msg': None}
knots = [-1, 0, 1]
t = np.concatenate(([self.x[0]]*(k + 1), knots, [self.x[-1]]*(k + 1)))
c = np.zeros(len(t))
# With knots preset
spl = Spline1D(knots=knots, degree=k, bounds=[self.x[0], self.x[-1]])
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import BSpline, splrep
tck, spline_fp, spline_ier, spline_msg = splrep(self.x, self.y,
w=w, k=k, t=knots, full_output=1)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
assert fitter.fit_info['fp'] == spline_fp
assert fitter.fit_info['ier'] == spline_ier
assert fitter.fit_info['msg'] == spline_msg
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w)
# test warning
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, t=knots, weights=w)
# With no knots present
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, t=knots, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import BSpline, splrep
tck = splrep(self.x, self.y, w=w, k=k, t=knots)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w, t=knots)
def generate_spline(self, w=None, bbox=[None]*2, k=None, s=None, t=None):
if k is None:
k = 3
from scipy.interpolate import BSpline, splrep
tck = splrep(self.x, self.y, w=w, xb=bbox[0], xe=bbox[1],
k=k, s=s, t=t)
return BSpline(*tck)
def test_derivative(self):
bspline = self.generate_spline()
spl = Spline1D()
spl.bspline = bspline
assert_allclose(spl.t, bspline.t)
assert_allclose(spl.c, bspline.c)
assert spl.degree == bspline.k
# 1st derivative
d_bspline = bspline.derivative(nu=1)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=1))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=2))
assert_allclose(d_bspline(self.xs, nu=2), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=3), bspline(self.xs, nu=4))
der = spl.derivative()
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 2
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=1))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=2))
assert_allclose(der.evaluate(self.xs, nu=2), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=3), spl.evaluate(self.xs, nu=4))
# 2nd derivative
d_bspline = bspline.derivative(nu=2)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=2))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=2), bspline(self.xs, nu=4))
der = spl.derivative(nu=2)
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 1
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=2))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=2), spl.evaluate(self.xs, nu=4))
# 3rd derivative
d_bspline = bspline.derivative(nu=3)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=4))
der = spl.derivative(nu=3)
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 0
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=4))
# Too many derivatives
for nu in range(4, 9):
with pytest.raises(ValueError) as err:
spl.derivative(nu=nu)
assert str(err.value) == "Must have nu <= 3"
def test_antiderivative(self):
bspline = self.generate_spline()
spl = Spline1D()
spl.bspline = bspline
# 1st antiderivative
a_bspline = bspline.antiderivative(nu=1)
assert_allclose(bspline(self.xs), a_bspline(self.xs, nu=1))
assert_allclose(bspline(self.xs, nu=1), a_bspline(self.xs, nu=2))
assert_allclose(bspline(self.xs, nu=2), a_bspline(self.xs, nu=3))
assert_allclose(bspline(self.xs, nu=3), a_bspline(self.xs, nu=4))
assert_allclose(bspline(self.xs, nu=4), a_bspline(self.xs, nu=5))
anti = spl.antiderivative()
assert_allclose(anti.t, a_bspline.t)
assert_allclose(anti.c, a_bspline.c)
assert anti.degree == a_bspline.k == 4
assert_allclose(spl.evaluate(self.xs), anti.evaluate(self.xs, nu=1))
assert_allclose(spl.evaluate(self.xs, nu=1), anti.evaluate(self.xs, nu=2))
assert_allclose(spl.evaluate(self.xs, nu=2), anti.evaluate(self.xs, nu=3))
assert_allclose(spl.evaluate(self.xs, nu=3), anti.evaluate(self.xs, nu=4))
assert_allclose(spl.evaluate(self.xs, nu=4), anti.evaluate(self.xs, nu=5))
# 2nd antiderivative
a_bspline = bspline.antiderivative(nu=2)
assert_allclose(bspline(self.xs), a_bspline(self.xs, nu=2))
assert_allclose(bspline(self.xs, nu=1), a_bspline(self.xs, nu=3))
assert_allclose(bspline(self.xs, nu=2), a_bspline(self.xs, nu=4))
assert_allclose(bspline(self.xs, nu=3), a_bspline(self.xs, nu=5))
assert_allclose(bspline(self.xs, nu=4), a_bspline(self.xs, nu=6))
anti = spl.antiderivative(nu=2)
assert_allclose(anti.t, a_bspline.t)
assert_allclose(anti.c, a_bspline.c)
assert anti.degree == a_bspline.k == 5
assert_allclose(spl.evaluate(self.xs), anti.evaluate(self.xs, nu=2))
assert_allclose(spl.evaluate(self.xs, nu=1), anti.evaluate(self.xs, nu=3))
assert_allclose(spl.evaluate(self.xs, nu=2), anti.evaluate(self.xs, nu=4))
assert_allclose(spl.evaluate(self.xs, nu=3), anti.evaluate(self.xs, nu=5))
assert_allclose(spl.evaluate(self.xs, nu=4), anti.evaluate(self.xs, nu=6))
# Too many anti derivatives
for nu in range(3, 9):
with pytest.raises(ValueError) as err:
spl.antiderivative(nu=nu)
assert str(err.value) == ("Supported splines can have max degree 5, "
f"antiderivative degree will be {nu + 3}")
def test__SplineFitter_error(self):
spl = Spline1D()
class SplineFitter(_SplineFitter):
def _fit_method(self, model, x, y, **kwargs):
super()._fit_method(model, x, y, **kwargs)
fitter = SplineFitter()
with pytest.raises(ValueError) as err:
fitter(spl, mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
assert str(err.value) == "1D model can only have 2 data points."
with pytest.raises(ModelDefinitionError) as err:
fitter(mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
assert str(err.value) == "Only spline models are compatible with this fitter."
with pytest.raises(NotImplementedError) as err:
fitter(spl, mk.MagicMock(), mk.MagicMock())
assert str(err.value) == "This has not been implemented for _SplineFitter."
|
239a1767ee6e27c90d12c8664dbc2b8573c5c2dfa89142356cf0e67fe6b3593b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests models.parameters
"""
# pylint: disable=invalid-name
import functools
import itertools
import unittest.mock as mk
import numpy as np
import pytest
from astropy import units as u
from astropy.modeling import fitting, models
from astropy.modeling.core import FittableModel, Model
from astropy.modeling.parameters import InputParameterError, Parameter, _tofloat, param_repr_oneline
from astropy.utils.data import get_pkg_data_filename
from . import irafutil
def setter1(val):
return val
def setter2(val, model):
model.do_something(val)
return val * model.p
class SetterModel(FittableModel):
n_inputs = 2
n_outputs = 1
xc = Parameter(default=1, setter=setter1)
yc = Parameter(default=1, setter=setter2)
def do_something(self, v):
pass
def __init__(self, xc, yc, p):
self.p = p # p is a value intended to be used by the setter
super().__init__()
self.xc = xc
self.yc = yc
def evaluate(self, x, y, xc, yc):
return (x - xc)**2 + (y - yc)**2
class TParModel(Model):
"""
A toy model to test parameters machinery
"""
coeff = Parameter()
e = Parameter()
def __init__(self, coeff, e, **kwargs):
super().__init__(coeff=coeff, e=e, **kwargs)
@staticmethod
def evaluate(coeff, e):
pass
class MockModel(FittableModel):
alpha = Parameter(name='alpha', default=42)
@staticmethod
def evaluate(*args):
pass
def test__tofloat():
# iterable
value = _tofloat([1, 2, 3])
assert isinstance(value, np.ndarray)
assert (value == np.array([1, 2, 3])).all()
assert np.all([isinstance(val, float) for val in value])
value = _tofloat(np.array([1, 2, 3]))
assert isinstance(value, np.ndarray)
assert (value == np.array([1, 2, 3])).all()
assert np.all([isinstance(val, float) for val in value])
with pytest.raises(InputParameterError) as err:
_tofloat('test')
assert str(err.value) == "Parameter of <class 'str'> could not be converted to float"
# quantity
assert _tofloat(1 * u.m) == 1 * u.m
# dimensions/scalar array
value = _tofloat(np.asanyarray(3))
assert isinstance(value, float)
assert value == 3
# A regular number
value = _tofloat(3)
assert isinstance(value, float)
assert value == 3
value = _tofloat(3.0)
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.float32(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.float64(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.int32(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.int64(3))
assert isinstance(value, float)
assert value == 3
# boolean
message = "Expected parameter to be of numerical type, not boolean"
with pytest.raises(InputParameterError) as err:
_tofloat(True)
assert str(err.value) == message
with pytest.raises(InputParameterError) as err:
_tofloat(False)
assert str(err.value) == message
# other
class Value:
pass
with pytest.raises(InputParameterError) as err:
_tofloat(Value)
assert str(err.value) == "Don't know how to convert parameter of <class 'type'> to float"
def test_parameter_properties():
"""Test if getting / setting of Parameter properties works."""
p = Parameter('alpha', default=1)
assert p.name == 'alpha'
# Parameter names are immutable
with pytest.raises(AttributeError):
p.name = 'beta'
assert p.fixed is False
p.fixed = True
assert p.fixed is True
assert p.tied is False
p.tied = lambda _: 0
p.tied = False
assert p.tied is False
assert p.min is None
p.min = 42
assert p.min == 42
p.min = None
assert p.min is None
assert p.max is None
p.max = 41
assert p.max == 41
def test_parameter_operators():
"""Test if the parameter arithmetic operators work."""
par = Parameter('alpha', default=42)
num = 42.
val = 3
assert par - val == num - val
assert val - par == val - num
assert par / val == num / val
assert val / par == val / num
assert par ** val == num ** val
assert val ** par == val ** num
assert par < 45
assert par > 41
assert par <= par
assert par >= par
assert par == par
assert -par == -num
assert abs(par) == abs(num)
# Test inherited models
class M1(Model):
m1a = Parameter(default=1.)
m1b = Parameter(default=5.)
def evaluate():
pass
class M2(M1):
m2c = Parameter(default=11.)
class M3(M2):
m3d = Parameter(default=20.)
def test_parameter_inheritance():
mod = M3()
assert mod.m1a == 1.
assert mod.m1b == 5.
assert mod.m2c == 11.
assert mod.m3d == 20.
for key in ['m1a', 'm1b', 'm2c', 'm3d']:
assert key in mod.__dict__
assert mod.param_names == ('m1a', 'm1b', 'm2c', 'm3d')
def test_param_metric():
mod = M3()
assert mod._param_metrics['m1a']['slice'] == slice(0, 1)
assert mod._param_metrics['m1b']['slice'] == slice(1, 2)
assert mod._param_metrics['m2c']['slice'] == slice(2, 3)
assert mod._param_metrics['m3d']['slice'] == slice(3, 4)
mod._parameters_to_array()
assert (mod._parameters == np.array([1., 5., 11., 20], dtype=np.float64)).all()
class TestParameters:
def setup_class(self):
"""
Unit tests for parameters
Read an iraf database file created by onedspec.identify. Use the
information to create a 1D Chebyshev model and perform the same fit.
Create also a gaussian model.
"""
test_file = get_pkg_data_filename('data/idcompspec.fits')
f = open(test_file)
lines = f.read()
reclist = lines.split("begin")
f.close()
record = irafutil.IdentifyRecord(reclist[1])
self.icoeff = record.coeff
order = int(record.fields['order'])
self.model = models.Chebyshev1D(order - 1)
self.gmodel = models.Gaussian1D(2, mean=3, stddev=4)
self.linear_fitter = fitting.LinearLSQFitter()
self.x = record.x
self.y = record.z
self.yy = np.array([record.z, record.z])
def test_set_parameters_as_list(self):
"""Tests updating parameters using a list."""
self.model.parameters = [30, 40, 50, 60, 70]
assert (self.model.parameters == [30., 40., 50., 60, 70]).all()
def test_set_parameters_as_array(self):
"""Tests updating parameters using an array."""
self.model.parameters = np.array([3, 4, 5, 6, 7])
assert (self.model.parameters == [3., 4., 5., 6., 7.]).all()
def test_set_as_tuple(self):
"""Tests updating parameters using a tuple."""
self.model.parameters = (1, 2, 3, 4, 5)
assert (self.model.parameters == [1, 2, 3, 4, 5]).all()
def test_set_model_attr_seq(self):
"""
Tests updating the parameters attribute when a model's
parameter (in this case coeff) is updated.
"""
self.model.parameters = [0, 0., 0., 0, 0]
self.model.c0 = 7
assert (self.model.parameters == [7, 0., 0., 0, 0]).all()
def test_set_model_attr_num(self):
"""Update the parameter list when a model's parameter is updated."""
self.gmodel.amplitude = 7
assert (self.gmodel.parameters == [7, 3, 4]).all()
def test_set_item(self):
"""Update the parameters using indexing."""
self.model.parameters = [1, 2, 3, 4, 5]
tpar = self.model.parameters
tpar[0] = 10.
self.model.parameters = tpar
assert (self.model.parameters == [10, 2, 3, 4, 5]).all()
assert self.model.c0 == 10
def test_wrong_size1(self):
"""
Tests raising an error when attempting to reset the parameters
using a list of a different size.
"""
with pytest.raises(InputParameterError):
self.model.parameters = [1, 2, 3]
def test_wrong_size2(self):
"""
Tests raising an exception when attempting to update a model's
parameter (in this case coeff) with a sequence of the wrong size.
"""
with pytest.raises(InputParameterError):
self.model.c0 = [1, 2, 3]
def test_wrong_shape(self):
"""
Tests raising an exception when attempting to update a model's
parameter and the new value has the wrong shape.
"""
with pytest.raises(InputParameterError):
self.gmodel.amplitude = [1, 2]
def test_par_against_iraf(self):
"""
Test the fitter modifies model.parameters.
Uses an iraf example.
"""
new_model = self.linear_fitter(self.model, self.x, self.y)
np.testing.assert_allclose(
new_model.parameters,
np.array([4826.1066602783685, 952.8943813407858, 12.641236013982386,
-1.7910672553339604, 0.90252884366711317]),
rtol=10 ** (-2))
def testPolynomial1D(self):
d = {'c0': 11, 'c1': 12, 'c2': 13, 'c3': 14}
p1 = models.Polynomial1D(3, **d)
np.testing.assert_equal(p1.parameters, [11, 12, 13, 14])
def test_poly1d_multiple_sets(self):
p1 = models.Polynomial1D(3, n_models=3)
np.testing.assert_equal(p1.parameters, [0.0, 0.0, 0.0, 0, 0, 0,
0, 0, 0, 0, 0, 0])
np.testing.assert_array_equal(p1.c0, [0, 0, 0])
p1.c0 = [10, 10, 10]
np.testing.assert_equal(p1.parameters, [10.0, 10.0, 10.0, 0, 0,
0, 0, 0, 0, 0, 0, 0])
def test_par_slicing(self):
"""
Test assigning to a parameter slice
"""
p1 = models.Polynomial1D(3, n_models=3)
p1.c0[:2] = [10, 10]
np.testing.assert_equal(p1.parameters, [10.0, 10.0, 0.0, 0, 0,
0, 0, 0, 0, 0, 0, 0])
def test_poly2d(self):
p2 = models.Polynomial2D(degree=3)
p2.c0_0 = 5
np.testing.assert_equal(p2.parameters, [5, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def test_poly2d_multiple_sets(self):
kw = {'c0_0': [2, 3], 'c1_0': [1, 2], 'c2_0': [4, 5],
'c0_1': [1, 1], 'c0_2': [2, 2], 'c1_1': [5, 5]}
p2 = models.Polynomial2D(2, **kw)
np.testing.assert_equal(p2.parameters, [2, 3, 1, 2, 4, 5,
1, 1, 2, 2, 5, 5])
def test_shift_model_parameters1d(self):
sh1 = models.Shift(2)
sh1.offset = 3
assert sh1.offset == 3
assert sh1.offset.value == 3
def test_scale_model_parametersnd(self):
sc1 = models.Scale([2, 2])
sc1.factor = [3, 3]
assert np.all(sc1.factor == [3, 3])
np.testing.assert_array_equal(sc1.factor.value, [3, 3])
def test_bounds(self):
# Valid __init__
param = Parameter(bounds=(1, 2))
assert param.bounds == (1, 2)
param = Parameter(min=1, max=2)
assert param.bounds == (1, 2)
# Errors __init__
message = ("bounds may not be specified simultaneously with min or max"
" when instantiating Parameter test")
with pytest.raises(ValueError) as err:
Parameter(bounds=(1, 2), min=1, name='test')
assert str(err.value) == message
with pytest.raises(ValueError) as err:
Parameter(bounds=(1, 2), max=2, name='test')
assert str(err.value) == message
with pytest.raises(ValueError) as err:
Parameter(bounds=(1, 2), min=1, max=2, name='test')
assert str(err.value) == message
# Setters
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.bounds == (None, None) == param._bounds
# Set errors
with pytest.raises(TypeError) as err:
param.bounds = ('test', None)
assert str(err.value) == "Min value must be a number or a Quantity"
with pytest.raises(TypeError) as err:
param.bounds = (None, 'test')
assert str(err.value) == "Max value must be a number or a Quantity"
# Set number
param.bounds = (1, 2)
assert param.bounds == (1, 2) == param._bounds
# Set Quantity
param.bounds = (1 * u.m, 2 * u.m)
assert param.bounds == (1, 2) == param._bounds
def test_modify_value(self):
param = Parameter(name='test', default=[1, 2, 3])
assert (param.value == [1, 2, 3]).all()
# Errors
with pytest.raises(InputParameterError) as err:
param[slice(0, 0)] = 2
assert str(err.value) == "Slice assignment outside the parameter dimensions for 'test'"
with pytest.raises(InputParameterError) as err:
param[3] = np.array([5])
assert str(err.value) == "Input dimension 3 invalid for 'test' parameter with dimension 1"
# assignment of a slice
param[slice(0, 2)] = [4, 5]
assert (param.value == [4, 5, 3]).all()
# assignment of a value
param[2] = 6
assert (param.value == [4, 5, 6]).all()
def test__set_unit(self):
param = Parameter(name='test', default=[1, 2, 3])
assert param.unit is None
# No force Error (no existing unit)
with pytest.raises(ValueError) as err:
param._set_unit(u.m)
assert str(err.value) == ("Cannot attach units to parameters that were "
"not initially specified with units")
# Force
param._set_unit(u.m, True)
assert param.unit == u.m
# No force Error (existing unit)
with pytest.raises(ValueError) as err:
param._set_unit(u.K)
assert str(err.value) == ("Cannot change the unit attribute directly, instead change the "
"parameter to a new quantity")
def test_quantity(self):
param = Parameter(name='test', default=[1, 2, 3])
assert param.unit is None
assert param.quantity is None
param = Parameter(name='test', default=[1, 2, 3], unit=u.m)
assert param.unit == u.m
assert (param.quantity == np.array([1, 2, 3]) * u.m).all()
def test_shape(self):
# Array like
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.shape == (4,)
# Reshape error
with pytest.raises(ValueError) as err:
param.shape = (5,)
assert str(err.value) == "cannot reshape array of size 4 into shape (5,)"
# Reshape success
param.shape = (2, 2)
assert param.shape == (2, 2)
assert (param.value == [[1, 2], [3, 4]]).all()
# Scalar
param = Parameter(name='test', default=1)
assert param.shape == ()
# Reshape error
with pytest.raises(ValueError) as err:
param.shape = (5,)
assert str(err.value) == "Cannot assign this shape to a scalar quantity"
param.shape = (1,)
# single value
param = Parameter(name='test', default=np.array([1]))
assert param.shape == (1,)
# Reshape error
with pytest.raises(ValueError) as err:
param.shape = (5,)
assert str(err.value) == "Cannot assign this shape to a scalar quantity"
param.shape = ()
def test_size(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.size == 4
param = Parameter(name='test', default=[1])
assert param.size == 1
param = Parameter(name='test', default=1)
assert param.size == 1
def test_std(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.std is None
assert param._std is None
param.std = 5
assert param.std == 5 == param._std
def test_fixed(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.fixed is False
assert param._fixed is False
# Set error
with pytest.raises(ValueError) as err:
param.fixed = 3
assert str(err.value) == "Value must be boolean"
# Set
param.fixed = True
assert param.fixed is True
assert param._fixed is True
def test_tied(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.tied is False
assert param._tied is False
# Set error
with pytest.raises(TypeError) as err:
param.tied = mk.NonCallableMagicMock()
assert str(err.value) == "Tied must be a callable or set to False or None"
# Set None
param.tied = None
assert param.tied is None
assert param._tied is None
# Set False
param.tied = False
assert param.tied is False
assert param._tied is False
# Set other
tied = mk.MagicMock()
param.tied = tied
assert param.tied == tied == param._tied
def test_validator(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param._validator is None
valid = mk.MagicMock()
param.validator(valid)
assert param._validator == valid
with pytest.raises(ValueError) as err:
param.validator(mk.NonCallableMagicMock())
assert str(err.value) == ("This decorator method expects a callable.\n"
"The use of this method as a direct validator is\n"
"deprecated; use the new validate method instead\n")
def test_validate(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param._validator is None
assert param.model is None
# Run without validator
param.validate(mk.MagicMock())
# Run with validator but no Model
validator = mk.MagicMock()
param.validator(validator)
assert param._validator == validator
param.validate(mk.MagicMock())
assert validator.call_args_list == []
# Full validate
param._model = mk.MagicMock()
value = mk.MagicMock()
param.validate(value)
assert validator.call_args_list == [mk.call(param._model, value)]
def test_copy(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
copy_param = param.copy()
assert (param == copy_param).all()
assert id(param) != id(copy_param)
def test_model(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.model is None
assert param._model is None
assert param._model_required is False
assert (param._value == [1, 2, 3, 4]).all()
setter = mk.MagicMock()
getter = mk.MagicMock()
param._setter = setter
param._getter = getter
# No Model Required
param._value = [5, 6, 7, 8]
model0 = mk.MagicMock()
setter0 = mk.MagicMock()
getter0 = mk.MagicMock()
with mk.patch.object(Parameter, '_create_value_wrapper',
side_effect=[setter0, getter0]) as mkCreate:
param.model = model0
assert param.model == model0 == param._model
assert param._setter == setter0
assert param._getter == getter0
assert mkCreate.call_args_list == [
mk.call(setter, model0),
mk.call(getter, model0)
]
assert param._value == [5, 6, 7, 8]
param._setter = setter
param._getter = getter
# Model required
param._model_required = True
model1 = mk.MagicMock()
setter1 = mk.MagicMock()
getter1 = mk.MagicMock()
setter1.return_value = [9, 10, 11, 12]
getter1.return_value = [9, 10, 11, 12]
with mk.patch.object(Parameter, '_create_value_wrapper',
side_effect=[setter1, getter1]) as mkCreate:
param.model = model1
assert param.model == model1 == param._model
assert param._setter == setter1
assert param._getter == getter1
assert mkCreate.call_args_list == [
mk.call(setter, model1),
mk.call(getter, model1)
]
assert (param.value == [9, 10, 11, 12]).all()
param._setter = setter
param._getter = getter
param._default = None
with mk.patch.object(Parameter, '_create_value_wrapper',
side_effect=[setter1, getter1]) as mkCreate:
param.model = model1
assert param.model == model1 == param._model
assert param._setter == setter1
assert param._getter == getter1
assert mkCreate.call_args_list == [
mk.call(setter, model1),
mk.call(getter, model1)
]
assert param._value is None
def test_raw_value(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
# Normal case
assert (param._raw_value == param.value).all()
# Bad setter
param._setter = True
param._internal_value = 4
assert param._raw_value == 4
def test__create_value_wrapper(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
# Bad ufunc
with pytest.raises(TypeError) as err:
param._create_value_wrapper(np.add, mk.MagicMock())
assert str(err.value) == ("A numpy.ufunc used for Parameter getter/setter "
"may only take one input argument")
# Good ufunc
assert param._create_value_wrapper(np.negative, mk.MagicMock()) == np.negative
# None
assert param._create_value_wrapper(None, mk.MagicMock()) is None
# wrapper with one argument
def wrapper1(a):
pass
assert param._create_value_wrapper(wrapper1, mk.MagicMock()) == wrapper1
# wrapper with two argument2
def wrapper2(a, b):
pass
# model is None
assert param._model_required is False
assert param._create_value_wrapper(wrapper2, None) == wrapper2
assert param._model_required is True
# model is not None
param._model_required = False
model = mk.MagicMock()
with mk.patch.object(functools, 'partial', autospec=True) as mkPartial:
assert param._create_value_wrapper(wrapper2, model) == mkPartial.return_value
# wrapper with more than 2 arguments
def wrapper3(a, b, c):
pass
with pytest.raises(TypeError) as err:
param._create_value_wrapper(wrapper3, mk.MagicMock())
assert str(err.value) == ("Parameter getter/setter must be a function "
"of either one or two arguments")
def test_bool(self):
# single value is true
param = Parameter(name='test', default=1)
assert param.value == 1
assert np.all(param)
if param:
assert True
else:
assert False
# single value is false
param = Parameter(name='test', default=0)
assert param.value == 0
assert not np.all(param)
if param:
assert False
else:
assert True
# vector value all true
param = Parameter(name='test', default=[1, 2, 3, 4])
assert np.all(param.value == [1, 2, 3, 4])
assert np.all(param)
if param:
assert True
else:
assert False
# vector value at least one false
param = Parameter(name='test', default=[1, 2, 0, 3, 4])
assert np.all(param.value == [1, 2, 0, 3, 4])
assert not np.all(param)
if param:
assert False
else:
assert True
def test_param_repr_oneline(self):
# Single value no units
param = Parameter(name='test', default=1)
assert param_repr_oneline(param) == '1.'
# Vector value no units
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param_repr_oneline(param) == '[1., 2., 3., 4.]'
# Single value units
param = Parameter(name='test', default=1*u.m)
assert param_repr_oneline(param) == '1. m'
# Vector value units
param = Parameter(name='test', default=[1, 2, 3, 4] * u.m)
assert param_repr_oneline(param) == '[1., 2., 3., 4.] m'
class TestMultipleParameterSets:
def setup_class(self):
self.x1 = np.arange(1, 10, .1)
self.y, self.x = np.mgrid[:10, :7]
self.x11 = np.array([self.x1, self.x1]).T
self.gmodel = models.Gaussian1D([12, 10], [3.5, 5.2], stddev=[.4, .7],
n_models=2)
def test_change_par(self):
"""
Test that a change to one parameter as a set propagates to param_sets.
"""
self.gmodel.amplitude = [1, 10]
np.testing.assert_almost_equal(
self.gmodel.param_sets,
np.array([[1.,
10],
[3.5,
5.2],
[0.4,
0.7]]))
np.all(self.gmodel.parameters == [1.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_par2(self):
"""
Test that a change to one single parameter in a set propagates to
param_sets.
"""
self.gmodel.amplitude[0] = 11
np.testing.assert_almost_equal(
self.gmodel.param_sets,
np.array([[11.,
10],
[3.5,
5.2],
[0.4,
0.7]]))
np.all(self.gmodel.parameters == [11.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_parameters(self):
self.gmodel.parameters = [13, 10, 9, 5.2, 0.4, 0.7]
np.testing.assert_almost_equal(self.gmodel.amplitude.value, [13., 10.])
np.testing.assert_almost_equal(self.gmodel.mean.value, [9., 5.2])
class TestParameterInitialization:
"""
This suite of tests checks most if not all cases if instantiating a model
with parameters of different shapes/sizes and with different numbers of
parameter sets.
"""
def test_single_model_scalar_parameters(self):
t = TParModel(10, 1)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[10], [1]])
assert np.all(t.parameters == [10, 1])
assert t.coeff.shape == ()
assert t.e.shape == ()
def test_single_model_scalar_and_array_parameters(self):
t = TParModel(10, [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert len(t.param_sets) == 2
assert np.all(t.param_sets[0] == [10])
assert np.all(t.param_sets[1] == [[1, 2]])
assert np.all(t.parameters == [10, 1, 2])
assert t.coeff.shape == ()
assert t.e.shape == (2,)
def test_single_model_1d_array_parameters(self):
t = TParModel([10, 20], [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[10, 20]], [[1, 2]]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
def test_single_model_1d_array_different_length_parameters(self):
with pytest.raises(InputParameterError):
# Not broadcastable
TParModel([1, 2], [3, 4, 5])
def test_single_model_2d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[[10, 20], [30, 40]]],
[[[1, 2], [3, 4]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
def test_single_model_2d_non_square_parameters(self):
coeff = np.array([[10, 20], [30, 40], [50, 60]])
e = np.array([[1, 2], [3, 4], [5, 6]])
t = TParModel(coeff, e)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[[10, 20], [30, 40], [50, 60]]],
[[[1, 2], [3, 4], [5, 6]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60,
1, 2, 3, 4, 5, 6])
assert t.coeff.shape == (3, 2)
assert t.e.shape == (3, 2)
t2 = TParModel(coeff.T, e.T)
assert len(t2) == 1
assert t2.model_set_axis is False
assert np.all(t2.param_sets == [[[[10, 30, 50], [20, 40, 60]]],
[[[1, 3, 5], [2, 4, 6]]]])
assert np.all(t2.parameters == [10, 30, 50, 20, 40, 60,
1, 3, 5, 2, 4, 6])
assert t2.coeff.shape == (2, 3)
assert t2.e.shape == (2, 3)
# Not broadcastable
with pytest.raises(InputParameterError):
TParModel(coeff, e.T)
with pytest.raises(InputParameterError):
TParModel(coeff.T, e)
def test_single_model_2d_broadcastable_parameters(self):
t = TParModel([[10, 20, 30], [40, 50, 60]], [1, 2, 3])
assert len(t) == 1
assert t.model_set_axis is False
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 20, 30], [40, 50, 60]]])
assert np.all(t.param_sets[1] == [[1, 2, 3]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3])
@pytest.mark.parametrize(('p1', 'p2'), [
(1, 2), (1, [2, 3]), ([1, 2], 3), ([1, 2, 3], [4, 5]),
([1, 2], [3, 4, 5])])
def test_two_model_incorrect_scalar_parameters(self, p1, p2):
with pytest.raises(InputParameterError):
TParModel(p1, p2, n_models=2)
@pytest.mark.parametrize('kwargs', [
{'n_models': 2}, {'model_set_axis': 0},
{'n_models': 2, 'model_set_axis': 0}])
def test_two_model_scalar_parameters(self, kwargs):
t = TParModel([10, 20], [1, 2], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[10, 20], [1, 2]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
@pytest.mark.parametrize('kwargs', [
{'n_models': 2}, {'model_set_axis': 0},
{'n_models': 2, 'model_set_axis': 0}])
def test_two_model_scalar_and_array_parameters(self, kwargs):
t = TParModel([10, 20], [[1, 2], [3, 4]], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[10], [20]])
assert np.all(t.param_sets[1] == [[1, 2], [3, 4]])
assert np.all(t.parameters == [10, 20, 1, 2, 3, 4])
assert t.coeff.shape == (2,)
assert t.e.shape == (2, 2)
def test_two_model_1d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[[10, 20], [30, 40]],
[[1, 2], [3, 4]]])
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
t2 = TParModel([[10, 20, 30], [40, 50, 60]],
[[1, 2, 3], [4, 5, 6]], n_models=2)
assert len(t2) == 2
assert t2.model_set_axis == 0
assert np.all(t2.param_sets == [[[10, 20, 30], [40, 50, 60]],
[[1, 2, 3], [4, 5, 6]]])
assert np.all(t2.parameters == [10, 20, 30, 40, 50, 60,
1, 2, 3, 4, 5, 6])
assert t2.coeff.shape == (2, 3)
assert t2.e.shape == (2, 3)
def test_two_model_mixed_dimension_array_parameters(self):
with pytest.raises(InputParameterError):
# Can't broadcast different array shapes
TParModel([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[9, 10, 11], [12, 13, 14]], n_models=2)
t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[1, 2], [3, 4]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 20], [30, 40]],
[[50, 60], [70, 80]]])
assert np.all(t.param_sets[1] == [[[1, 2]], [[3, 4]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80,
1, 2, 3, 4])
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2)
def test_two_model_2d_array_parameters(self):
t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[[[10, 20], [30, 40]],
[[50, 60], [70, 80]]],
[[[1, 2], [3, 4]],
[[5, 6], [7, 8]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80,
1, 2, 3, 4, 5, 6, 7, 8])
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2, 2)
def test_two_model_nonzero_model_set_axis(self):
# An example where the model set axis is the *last* axis of the
# parameter arrays
coeff = np.array([[[10, 20, 30], [30, 40, 50]], [[50, 60, 70], [70, 80, 90]]])
coeff = np.rollaxis(coeff, 0, 3)
e = np.array([[1, 2, 3], [3, 4, 5]])
e = np.rollaxis(e, 0, 2)
t = TParModel(coeff, e, n_models=2, model_set_axis=-1)
assert len(t) == 2
assert t.model_set_axis == -1
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 50], [20, 60], [30, 70]],
[[30, 70], [40, 80], [50, 90]]])
assert np.all(t.param_sets[1] == [[[1, 3], [2, 4], [3, 5]]])
assert np.all(t.parameters == [10, 50, 20, 60, 30, 70, 30, 70, 40, 80,
50, 90, 1, 3, 2, 4, 3, 5])
assert t.coeff.shape == (2, 3, 2) # note change in api
assert t.e.shape == (3, 2) # note change in api
def test_wrong_number_of_params(self):
with pytest.raises(InputParameterError):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), n_models=2)
with pytest.raises(InputParameterError):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), model_set_axis=0)
def test_wrong_number_of_params2(self):
with pytest.raises(InputParameterError):
TParModel(coeff=[[1, 2], [3, 4]], e=4, n_models=2)
with pytest.raises(InputParameterError):
TParModel(coeff=[[1, 2], [3, 4]], e=4, model_set_axis=0)
def test_array_parameter1(self):
with pytest.raises(InputParameterError):
TParModel(np.array([[1, 2], [3, 4]]), 1, model_set_axis=0)
def test_array_parameter2(self):
with pytest.raises(InputParameterError):
TParModel(np.array([[1, 2], [3, 4]]), (1, 1, 11),
model_set_axis=0)
def test_array_parameter4(self):
"""
Test multiple parameter model with array-valued parameters of the same
size as the number of parameter sets.
"""
t4 = TParModel([[1, 2], [3, 4]], [5, 6], model_set_axis=False)
assert len(t4) == 1
assert t4.coeff.shape == (2, 2)
assert t4.e.shape == (2,)
assert np.issubdtype(t4.param_sets.dtype, np.object_)
assert np.all(t4.param_sets[0] == [[1, 2], [3, 4]])
assert np.all(t4.param_sets[1] == [5, 6])
def test_non_broadcasting_parameters():
"""
Tests that in a model with 3 parameters that do not all mutually broadcast,
this is determined correctly regardless of what order the parameters are
in.
"""
a = 3
b = np.array([[1, 2, 3], [4, 5, 6]])
c = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
class TestModel(Model):
p1 = Parameter()
p2 = Parameter()
p3 = Parameter()
def evaluate(self, *args):
return
# a broadcasts with both b and c, but b does not broadcast with c
for args in itertools.permutations((a, b, c)):
with pytest.raises(InputParameterError):
TestModel(*args)
def test_setter():
pars = np.random.rand(20).reshape((10, 2))
model = SetterModel(xc=-1, yc=3, p=np.pi)
for x, y in pars:
np.testing.assert_almost_equal(
model(x, y),
(x + 1)**2 + (y - np.pi * 3)**2)
|
aab09909534fc1531327e6da41fc63cbe186db96a8c7811c3698ebf86cc93116 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name, no-member
import numpy as np
import pytest
from astropy import units as u
from astropy.modeling.bounding_box import ModelBoundingBox
from astropy.modeling.core import fix_inputs
from astropy.modeling.fitting import DogBoxLSQFitter, LevMarLSQFitter, LMLSQFitter, TRFLSQFitter
from astropy.modeling.functional_models import (
AiryDisk2D, ArcCosine1D, ArcSine1D, ArcTangent1D, Box1D, Box2D, Const1D, Const2D, Cosine1D,
Disk2D, Ellipse2D, Exponential1D, Gaussian1D, Gaussian2D, KingProjectedAnalytic1D, Linear1D,
Logarithmic1D, Lorentz1D, Moffat1D, Moffat2D, Multiply, Planar2D, RickerWavelet1D,
RickerWavelet2D, Ring2D, Scale, Sersic1D, Sersic2D, Sine1D, Tangent1D, Trapezoid1D,
TrapezoidDisk2D, Voigt1D)
from astropy.modeling.parameters import InputParameterError
from astropy.modeling.physical_models import Drude1D, Plummer1D
from astropy.modeling.polynomial import Polynomial1D, Polynomial2D
from astropy.modeling.powerlaws import (
BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D, PowerLaw1D,
SmoothlyBrokenPowerLaw1D)
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
fitters = [LevMarLSQFitter, TRFLSQFitter, LMLSQFitter, DogBoxLSQFitter]
FUNC_MODELS_1D = [
{
'class': Gaussian1D,
'parameters': {'amplitude': 3 * u.Jy, 'mean': 2 * u.m, 'stddev': 30 * u.cm},
'evaluation': [(2600 * u.mm, 3 * u.Jy * np.exp(-2))],
'bounding_box': [0.35, 3.65] * u.m
},
{
'class': Sersic1D,
'parameters': {'amplitude': 3 * u.MJy / u.sr, 'r_eff': 2 * u.arcsec, 'n': 4},
'evaluation': [(3 * u.arcsec, 1.3237148119468918 * u.MJy/u.sr)],
'bounding_box': False
},
{
'class': Sine1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.5},
'evaluation': [(1 * u.s, -3 * u.km / u.s)],
'bounding_box': False
},
{
'class': Cosine1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.25},
'evaluation': [(1 * u.s, -3 * u.km / u.s)],
'bounding_box': False
},
{
'class': Tangent1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.125 * u.Hz, 'phase': 0.25},
'evaluation': [(1 * u.s, -3 * u.km / u.s)],
'bounding_box': [-4, 0] / u.Hz
},
{
'class': ArcSine1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.5},
'evaluation': [(0 * u.km / u.s, -2 * u.s)],
'bounding_box': [-3, 3] * u.km / u.s
},
{
'class': ArcCosine1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.5},
'evaluation': [(0 * u.km / u.s, -1 * u.s)],
'bounding_box': [-3, 3] * u.km / u.s
},
{
'class': ArcTangent1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.125 * u.Hz, 'phase': 0.25},
'evaluation': [(0 * u.km / u.s, -2 * u.s)],
'bounding_box': False
},
{
'class': Linear1D,
'parameters': {'slope': 3 * u.km / u.s, 'intercept': 5000 * u.m},
'evaluation': [(6000 * u.ms, 23 * u.km)],
'bounding_box': False
},
{
'class': Lorentz1D,
'parameters': {'amplitude': 2 * u.Jy, 'x_0': 505 * u.nm, 'fwhm': 100 * u.AA},
'evaluation': [(0.51 * u.micron, 1 * u.Jy)],
'bounding_box': [255, 755] * u.nm
},
{
'class': Voigt1D,
'parameters': {'amplitude_L': 2 * u.Jy, 'x_0': 505 * u.nm,
'fwhm_L': 100 * u.AA, 'fwhm_G': 50 * u.AA},
'evaluation': [(0.51 * u.micron, 1.0621795524 * u.Jy)],
'bounding_box': False
},
{
'class': Const1D,
'parameters': {'amplitude': 3 * u.Jy},
'evaluation': [(0.6 * u.micron, 3 * u.Jy)],
'bounding_box': False
},
{
'class': Box1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'width': 1 * u.um},
'evaluation': [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],
'bounding_box': [3.9, 4.9] * u.um
},
{
'class': Trapezoid1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um,
'width': 1 * u.um, 'slope': 5 * u.Jy / u.um},
'evaluation': [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],
'bounding_box': [3.3, 5.5] * u.um
},
{
'class': RickerWavelet1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'sigma': 1e-3 * u.mm},
'evaluation': [(1000 * u.nm, -0.09785050 * u.Jy)],
'bounding_box': [-5.6, 14.4] * u.um
},
{
'class': Moffat1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'gamma': 1e-3 * u.mm, 'alpha': 1},
'evaluation': [(1000 * u.nm, 0.238853503 * u.Jy)],
'bounding_box': False
},
{
'class': KingProjectedAnalytic1D,
'parameters': {'amplitude': 1. * u.Msun/u.pc**2, 'r_core': 1. * u.pc, 'r_tide': 2. * u.pc},
'evaluation': [(0.5 * u.pc, 0.2 * u.Msun/u.pc**2)],
'bounding_box': [0. * u.pc, 2. * u.pc]
},
{
'class': Logarithmic1D,
'parameters': {'amplitude': 5*u.m, 'tau': 2 * u.m},
'evaluation': [(4 * u.m, 3.4657359027997265 * u.m)],
'bounding_box': False
},
{
'class': Exponential1D,
'parameters': {'amplitude': 5*u.m, 'tau': 2 * u.m},
'evaluation': [(4 * u.m, 36.945280494653254 * u.m)],
'bounding_box': False
}
]
SCALE_MODELS = [
{
'class': Scale,
'parameters': {'factor': 2*u.m},
'evaluation': [(1*u.m, 2*u.m)],
'bounding_box': False
},
{
'class': Multiply,
'parameters': {'factor': 2*u.m},
'evaluation': [(1 * u.m/u.m, 2*u.m)],
'bounding_box': False
},
]
PHYS_MODELS_1D = [
{
'class': Plummer1D,
'parameters': {'mass': 3 * u.kg, 'r_plum': 0.5 * u.m},
'evaluation': [(1 * u.m, 0.10249381 * u.kg / (u.m ** 3))],
'bounding_box': False
},
{
'class': Drude1D,
'parameters': {'amplitude': 1.0 * u.m, 'x_0': 2175. * u.AA, 'fwhm': 400. * u.AA},
'evaluation': [(2000 * u.AA, 0.5452317018423869 * u.m)],
'bounding_box': [-17825, 22175] * u.AA
},
]
FUNC_MODELS_2D = [
{
'class': Gaussian2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_mean': 2 * u.m, 'y_mean': 1 * u.m,
'x_stddev': 3 * u.m, 'y_stddev': 2 * u.m, 'theta': 45 * u.deg},
'evaluation': [(412.1320343 * u.cm, 3.121320343 * u.m, 3 * u.Jy * np.exp(-0.5))],
'bounding_box': [[-13.02230366, 15.02230366],
[-12.02230366, 16.02230366]] * u.m
},
{
'class': Const2D,
'parameters': {'amplitude': 3 * u.Jy},
'evaluation': [(0.6 * u.micron, 0.2 * u.m, 3 * u.Jy)],
'bounding_box': False
},
{
'class': Disk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'R_0': 300 * u.cm},
'evaluation': [(5.8 * u.m, 201 * u.cm, 3 * u.Jy)],
'bounding_box': [[-1, 5], [0, 6]] * u.m
},
{
'class': TrapezoidDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 1 * u.m, 'y_0': 2 * u.m,
'R_0': 100 * u.cm, 'slope': 1 * u.Jy / u.m},
'evaluation': [(3.5 * u.m, 2 * u.m, 1.5 * u.Jy)],
'bounding_box': [[-2, 6], [-3, 5]] * u.m
},
{
'class': Ellipse2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'a': 300 * u.cm, 'b': 200 * u.cm, 'theta': 45 * u.deg},
'evaluation': [(4 * u.m, 300 * u.cm, 3 * u.Jy)],
'bounding_box': [[-0.5495097567963922, 4.549509756796392],
[0.4504902432036073, 5.549509756796393]] * u.m
},
{
'class': Ring2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'r_in': 2 * u.cm, 'r_out': 2.1 * u.cm},
'evaluation': [(302.05 * u.cm, 2 * u.m + 10 * u.um, 3 * u.Jy)],
'bounding_box': [[1.979, 2.021], [2.979, 3.021]] * u.m
},
{
'class': Box2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.s,
'x_width': 4 * u.cm, 'y_width': 3 * u.s},
'evaluation': [(301 * u.cm, 3 * u.s, 3 * u.Jy)],
'bounding_box': [[0.5 * u.s, 3.5 * u.s], [2.98 * u.m, 3.02 * u.m]]
},
{
'class': RickerWavelet2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'sigma': 1 * u.m},
'evaluation': [(4 * u.m, 2.5 * u.m, 0.602169107 * u.Jy)],
'bounding_box': False
},
{
'class': AiryDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'radius': 1 * u.m},
'evaluation': [(4 * u.m, 2.1 * u.m, 4.76998480e-05 * u.Jy)],
'bounding_box': False
},
{
'class': Moffat2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'y_0': 3.5 * u.um,
'gamma': 1e-3 * u.mm, 'alpha': 1},
'evaluation': [(1000 * u.nm, 2 * u.um, 0.202565833 * u.Jy)],
'bounding_box': False
},
{
'class': Sersic2D,
'parameters': {'amplitude': 3 * u.MJy / u.sr, 'x_0': 1 * u.arcsec,
'y_0': 2 * u.arcsec, 'r_eff': 2 * u.arcsec, 'n': 4,
'ellip': 0, 'theta': 0},
'evaluation': [(3 * u.arcsec, 2.5 * u.arcsec, 2.829990489 * u.MJy/u.sr)],
'bounding_box': False
},
{
'class': Planar2D,
'parameters': {'slope_x': 2*u.m, 'slope_y': 3*u.m, 'intercept': 4*u.m},
'evaluation': [(5*u.m/u.m, 6*u.m/u.m, 32*u.m)],
'bounding_box': False
},
]
POWERLAW_MODELS = [
{
'class': PowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1},
'evaluation': [(1 * u.m, 500 * u.g)],
'bounding_box': False
},
{
'class': BrokenPowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_break': 10 * u.cm, 'alpha_1': 1, 'alpha_2': -1},
'evaluation': [(1 * u.m, 50 * u.kg), (1 * u.cm, 50 * u.kg)],
'bounding_box': False
},
{
'class': SmoothlyBrokenPowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_break': 10 * u.cm,
'alpha_1': 1, 'alpha_2': -1, 'delta': 1},
'evaluation': [(1 * u.cm, 15.125 * u.kg), (1 * u.m, 15.125 * u.kg)],
'bounding_box': False
},
{
'class': ExponentialCutoffPowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1, 'x_cutoff': 1 * u.m},
'evaluation': [(1 * u.um, 499999.5 * u.kg), (10 * u.m, 50 * np.exp(-10) * u.g)],
'bounding_box': False
},
{
'class': LogParabola1D,
'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1, 'beta': 2},
'evaluation': [(1 * u.cm, 5 * 0.1 ** (-1 - 2 * np.log(0.1)) * u.kg)],
'bounding_box': False
},
]
POLY_MODELS = [
{
'class': Polynomial1D,
'parameters': {'degree': 2, 'c0': 3 * u.one, 'c1': 2 / u.m, 'c2': 3 / u.m**2},
'evaluation': [(3 * u.m, 36 * u.one)],
'bounding_box': False
},
{
'class': Polynomial1D,
'parameters': {'degree': 2, 'c0': 3 * u.kg, 'c1': 2 * u.kg / u.m, 'c2': 3 * u.kg / u.m**2},
'evaluation': [(3 * u.m, 36 * u.kg)],
'bounding_box': False
},
{
'class': Polynomial1D,
'parameters': {'degree': 2, 'c0': 3 * u.kg, 'c1': 2 * u.kg, 'c2': 3 * u.kg},
'evaluation': [(3 * u.one, 36 * u.kg)],
'bounding_box': False
},
{
'class': Polynomial2D,
'parameters': {'degree': 2, 'c0_0': 3 * u.one, 'c1_0': 2 / u.m, 'c2_0': 3 / u.m**2,
'c0_1': 3 / u.s, 'c0_2': -2 / u.s**2, 'c1_1': 5 / u.m / u.s},
'evaluation': [(3 * u.m, 2 * u.s, 64 * u.one)],
'bounding_box': False
},
{
'class': Polynomial2D,
'parameters': {'degree': 2, 'c0_0': 3 * u.kg,
'c1_0': 2 * u.kg / u.m, 'c2_0': 3 * u.kg / u.m**2,
'c0_1': 3 * u.kg / u.s, 'c0_2': -2 * u.kg / u.s**2,
'c1_1': 5 * u.kg / u.m / u.s},
'evaluation': [(3 * u.m, 2 * u.s, 64 * u.kg)],
'bounding_box': False
},
{
'class': Polynomial2D,
'parameters': {'degree': 2, 'c0_0': 3 * u.kg, 'c1_0': 2 * u.kg, 'c2_0': 3 * u.kg,
'c0_1': 3 * u.kg, 'c0_2': -2 * u.kg, 'c1_1': 5 * u.kg},
'evaluation': [(3 * u.one, 2 * u.one, 64 * u.kg)],
'bounding_box': False
},
]
MODELS = (FUNC_MODELS_1D + SCALE_MODELS + FUNC_MODELS_2D + POWERLAW_MODELS +
PHYS_MODELS_1D + POLY_MODELS)
SCIPY_MODELS = {Sersic1D, Sersic2D, AiryDisk2D}
# These models will fail fitting test, because built in fitting data
# will produce non-finite values
NON_FINITE_LevMar_MODELS = [
Sersic1D,
ArcSine1D,
ArcCosine1D,
PowerLaw1D,
ExponentialCutoffPowerLaw1D,
BrokenPowerLaw1D,
LogParabola1D
]
# These models will fail the TRFLSQFitter fitting test due to non-finite
NON_FINITE_TRF_MODELS = [
ArcSine1D,
ArcCosine1D,
Sersic1D,
Sersic2D,
PowerLaw1D,
ExponentialCutoffPowerLaw1D,
BrokenPowerLaw1D
]
# These models will fail the LMLSQFitter fitting test due to non-finite
NON_FINITE_LM_MODELS = [
Sersic1D,
ArcSine1D,
ArcCosine1D,
PowerLaw1D,
LogParabola1D,
ExponentialCutoffPowerLaw1D,
BrokenPowerLaw1D
]
# These models will fail the DogBoxLSQFitter fitting test due to non-finite
NON_FINITE_DogBox_MODELS = [
Sersic1D,
Sersic2D,
ArcSine1D,
ArcCosine1D,
SmoothlyBrokenPowerLaw1D,
ExponentialCutoffPowerLaw1D,
LogParabola1D
]
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_without_units(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
for args in model['evaluation']:
if len(args) == 2:
kwargs = dict(zip(('x', 'y'), args))
else:
kwargs = dict(zip(('x', 'y', 'z'), args))
if kwargs['x'].unit.is_equivalent(kwargs['y'].unit):
kwargs['x'] = kwargs['x'].to(kwargs['y'].unit)
mnu = m.without_units_for_data(**kwargs)
args = [x.value for x in kwargs.values()]
assert_quantity_allclose(mnu(*args[:-1]), args[-1])
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_with_units(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
for args in model['evaluation']:
assert_quantity_allclose(m(*args[:-1]), args[-1])
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_with_units_x_array(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
for args in model['evaluation']:
if len(args) == 2:
x, y = args
x_arr = u.Quantity([x, x])
result = m(x_arr)
assert_quantity_allclose(result, u.Quantity([y, y]))
else:
x, y, z = args
x_arr = u.Quantity([x, x])
y_arr = u.Quantity([y, y])
result = m(x_arr, y_arr)
assert_quantity_allclose(result, u.Quantity([z, z]))
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_with_units_param_array(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
params = {}
for key, value in model['parameters'].items():
if value is None or key == 'degree':
params[key] = value
else:
params[key] = np.repeat(value, 2)
params['n_models'] = 2
m = model['class'](**params)
for args in model['evaluation']:
if len(args) == 2:
x, y = args
x_arr = u.Quantity([x, x])
result = m(x_arr)
assert_quantity_allclose(result, u.Quantity([y, y]))
else:
x, y, z = args
x_arr = u.Quantity([x, x])
y_arr = u.Quantity([y, y])
result = m(x_arr, y_arr)
assert_quantity_allclose(result, u.Quantity([z, z]))
if model['class'] == Drude1D:
params['x_0'][-1] = 0 * u.AA
with pytest.raises(InputParameterError) as err:
model['class'](**params)
assert str(err.value) == '0 is not an allowed value for x_0'
@pytest.mark.parametrize('model', MODELS)
def test_models_bounding_box(model):
# In some cases, having units in parameters caused bounding_box to break,
# so this is to ensure that it works correctly.
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
# In the following we need to explicitly test that the value is False
# since Quantities no longer evaluate as as True
if model['bounding_box'] is False:
# Check that NotImplementedError is raised, so that if bounding_box is
# implemented we remember to set bounding_box=True in the list of models
# above
with pytest.raises(NotImplementedError):
m.bounding_box
else:
# A bounding box may have inhomogeneous units so we need to check the
# values one by one.
for i in range(len(model['bounding_box'])):
bbox = m.bounding_box
if isinstance(bbox, ModelBoundingBox):
bbox = bbox.bounding_box()
assert_quantity_allclose(bbox[i], model['bounding_box'][i])
@pytest.mark.parametrize('model', MODELS)
def test_compound_model_input_units_equivalencies_defaults(model):
m = model['class'](**model['parameters'])
assert m.input_units_equivalencies is None
compound_model = m + m
assert compound_model.inputs_map()['x'][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {'x': 1})
assert fixed_input_model.input_units_equivalencies is None
compound_model = m - m
assert compound_model.inputs_map()['x'][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {'x': 1})
assert fixed_input_model.input_units_equivalencies is None
compound_model = m & m
assert compound_model.inputs_map()['x1'][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {'x0': 1})
assert fixed_input_model.inputs_map()['x1'][0].input_units_equivalencies is None
assert fixed_input_model.input_units_equivalencies is None
if m.n_outputs == m.n_inputs:
compound_model = m | m
assert compound_model.inputs_map()['x'][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {'x': 1})
assert fixed_input_model.input_units_equivalencies is None
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings(r'ignore:.*:RuntimeWarning')
@pytest.mark.filterwarnings(r'ignore:Model is linear in parameters.*')
@pytest.mark.filterwarnings(r'ignore:The fit may be unsuccessful.*')
@pytest.mark.parametrize('model', MODELS)
@pytest.mark.parametrize('fitter', fitters)
def test_models_fitting(model, fitter):
fitter = fitter()
if (
(isinstance(fitter, LevMarLSQFitter) and model['class'] in NON_FINITE_LevMar_MODELS) or
(isinstance(fitter, TRFLSQFitter) and model['class'] in NON_FINITE_TRF_MODELS) or
(isinstance(fitter, LMLSQFitter) and model['class'] in NON_FINITE_LM_MODELS) or
(isinstance(fitter, DogBoxLSQFitter) and model['class'] in NON_FINITE_DogBox_MODELS)
):
return
m = model['class'](**model['parameters'])
if len(model['evaluation'][0]) == 2:
x = np.linspace(1, 3, 100) * model['evaluation'][0][0].unit
y = np.exp(-x.value ** 2) * model['evaluation'][0][1].unit
args = [x, y]
else:
x = np.linspace(1, 3, 100) * model['evaluation'][0][0].unit
y = np.linspace(1, 3, 100) * model['evaluation'][0][1].unit
z = np.exp(-x.value**2 - y.value**2) * model['evaluation'][0][2].unit
args = [x, y, z]
# Test that the model fits even if it has units on parameters
m_new = fitter(m, *args)
# Check that units have been put back correctly
for param_name in m.param_names:
par_bef = getattr(m, param_name)
par_aft = getattr(m_new, param_name)
if par_bef.unit is None:
# If the parameter used to not have a unit then had a radian unit
# for example, then we should allow that
assert par_aft.unit is None or par_aft.unit is u.rad
else:
assert par_aft.unit.is_equivalent(par_bef.unit)
unit_mismatch_models = [
{'class': Gaussian2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_mean': 2 * u.m, 'y_mean': 1 * u.m,
'x_stddev': 3 * u.m, 'y_stddev': 2 * u.m, 'theta': 45 * u.deg},
'evaluation': [(412.1320343 * u.cm, 3.121320343 * u.K, 3 * u.Jy * np.exp(-0.5)),
(412.1320343 * u.K, 3.121320343 * u.m, 3 * u.Jy * np.exp(-0.5))],
'bounding_box': [[-14.18257445, 16.18257445], [-10.75693665, 14.75693665]] * u.m},
{'class': Ellipse2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'a': 300 * u.cm, 'b': 200 * u.cm, 'theta': 45 * u.deg},
'evaluation': [(4 * u.m, 300 * u.K, 3 * u.Jy),
(4 * u.K, 300 * u.cm, 3 * u.Jy)],
'bounding_box': [[-0.76046808, 4.76046808], [0.68055697, 5.31944302]] * u.m},
{'class': Disk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'R_0': 300 * u.cm},
'evaluation': [(5.8 * u.m, 201 * u.K, 3 * u.Jy),
(5.8 * u.K, 201 * u.cm, 3 * u.Jy)],
'bounding_box': [[-1, 5], [0, 6]] * u.m},
{'class': Ring2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'r_in': 2 * u.cm, 'r_out': 2.1 * u.cm},
'evaluation': [(302.05 * u.cm, 2 * u.K + 10 * u.K, 3 * u.Jy),
(302.05 * u.K, 2 * u.m + 10 * u.um, 3 * u.Jy)],
'bounding_box': [[1.979, 2.021], [2.979, 3.021]] * u.m},
{'class': TrapezoidDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 1 * u.m, 'y_0': 2 * u.m,
'R_0': 100 * u.cm, 'slope': 1 * u.Jy / u.m},
'evaluation': [(3.5 * u.m, 2 * u.K, 1.5 * u.Jy),
(3.5 * u.K, 2 * u.m, 1.5 * u.Jy)],
'bounding_box': [[-2, 6], [-3, 5]] * u.m},
{'class': RickerWavelet2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'sigma': 1 * u.m},
'evaluation': [(4 * u.m, 2.5 * u.K, 0.602169107 * u.Jy),
(4 * u.K, 2.5 * u.m, 0.602169107 * u.Jy)],
'bounding_box': False},
{'class': AiryDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'radius': 1 * u.m},
'evaluation': [(4 * u.m, 2.1 * u.K, 4.76998480e-05 * u.Jy),
(4 * u.K, 2.1 * u.m, 4.76998480e-05 * u.Jy)],
'bounding_box': False},
{'class': Moffat2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'y_0': 3.5 * u.um,
'gamma': 1e-3 * u.mm, 'alpha': 1},
'evaluation': [(1000 * u.nm, 2 * u.K, 0.202565833 * u.Jy),
(1000 * u.K, 2 * u.um, 0.202565833 * u.Jy)],
'bounding_box': False},
{'class': Sersic2D,
'parameters': {'amplitude': 3 * u.MJy / u.sr, 'x_0': 1 * u.arcsec,
'y_0': 2 * u.arcsec, 'r_eff': 2 * u.arcsec, 'n': 4,
'ellip': 0, 'theta': 0},
'evaluation': [(3 * u.arcsec, 2.5 * u.m, 2.829990489 * u.MJy/u.sr),
(3 * u.m, 2.5 * u.arcsec, 2.829990489 * u.MJy/u.sr)],
'bounding_box': False},
]
@pytest.mark.parametrize('model', unit_mismatch_models)
def test_input_unit_mismatch_error(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
message = "Units of 'x' and 'y' inputs should match"
m = model['class'](**model['parameters'])
for args in model['evaluation']:
if len(args) == 2:
kwargs = dict(zip(('x', 'y'), args))
else:
kwargs = dict(zip(('x', 'y', 'z'), args))
if kwargs['x'].unit.is_equivalent(kwargs['y'].unit):
kwargs['x'] = kwargs['x'].to(kwargs['y'].unit)
with pytest.raises(u.UnitsError) as err:
m.without_units_for_data(**kwargs)
assert str(err.value) == message
|
bdff4af521e02da06a941aa86c05511745f96983a5ed2ddaa66ea373d22d602a | from .iers import *
|
ae4db7c61c0ab9702090bac360fc8908a9ff56ab7c067158971743d57055424c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module includes a fast iterator-based XML parser.
"""
# STDLIB
import contextlib
import io
import sys
# ASTROPY
from astropy.utils import data
__all__ = ['get_xml_iterator', 'get_xml_encoding', 'xml_readlines']
@contextlib.contextmanager
def _convert_to_fd_or_read_function(fd):
"""
Returns a function suitable for streaming input, or a file object.
This function is only useful if passing off to C code where:
- If it's a real file object, we want to use it as a real
C file object to avoid the Python overhead.
- If it's not a real file object, it's much handier to just
have a Python function to call.
This is somewhat quirky behavior, of course, which is why it is
private. For a more useful version of similar behavior, see
`astropy.utils.misc.get_readable_fileobj`.
Parameters
----------
fd : object
May be:
- a file object. If the file is uncompressed, this raw
file object is returned verbatim. Otherwise, the read
method is returned.
- a function that reads from a stream, in which case it is
returned verbatim.
- a file path, in which case it is opened. Again, like a
file object, if it's uncompressed, a raw file object is
returned, otherwise its read method.
- an object with a :meth:`read` method, in which case that
method is returned.
Returns
-------
fd : context-dependent
See above.
"""
if callable(fd):
yield fd
return
with data.get_readable_fileobj(fd, encoding='binary') as new_fd:
if sys.platform.startswith('win'):
yield new_fd.read
else:
if isinstance(new_fd, io.FileIO):
yield new_fd
else:
yield new_fd.read
def _fast_iterparse(fd, buffersize=2 ** 10):
from xml.parsers import expat
if not callable(fd):
read = fd.read
else:
read = fd
queue = []
text = []
def start(name, attr):
queue.append((True, name, attr,
(parser.CurrentLineNumber, parser.CurrentColumnNumber)))
del text[:]
def end(name):
queue.append((False, name, ''.join(text).strip(),
(parser.CurrentLineNumber, parser.CurrentColumnNumber)))
parser = expat.ParserCreate()
parser.specified_attributes = True
parser.StartElementHandler = start
parser.EndElementHandler = end
parser.CharacterDataHandler = text.append
Parse = parser.Parse
data = read(buffersize)
while data:
Parse(data, False)
yield from queue
del queue[:]
data = read(buffersize)
Parse('', True)
yield from queue
# Try to import the C version of the iterparser, otherwise fall back
# to the Python implementation above.
_slow_iterparse = _fast_iterparse
try:
from . import _iterparser
_fast_iterparse = _iterparser.IterParser
except ImportError:
pass
@contextlib.contextmanager
def get_xml_iterator(source, _debug_python_based_parser=False):
"""
Returns an iterator over the elements of an XML file.
The iterator doesn't ever build a tree, so it is much more memory
and time efficient than the alternative in ``cElementTree``.
Parameters
----------
source : path-like, readable file-like, or callable
Handle that contains the data or function that reads it.
If a function or callable object, it must directly read from a stream.
Non-callable objects must define a ``read`` method.
Returns
-------
parts : iterator
The iterator returns 4-tuples (*start*, *tag*, *data*, *pos*):
- *start*: when `True` is a start element event, otherwise
an end element event.
- *tag*: The name of the element
- *data*: Depends on the value of *event*:
- if *start* == `True`, data is a dictionary of
attributes
- if *start* == `False`, data is a string containing
the text content of the element
- *pos*: Tuple (*line*, *col*) indicating the source of the
event.
"""
with _convert_to_fd_or_read_function(source) as fd:
if _debug_python_based_parser:
context = _slow_iterparse(fd)
else:
context = _fast_iterparse(fd)
yield iter(context)
def get_xml_encoding(source):
"""
Determine the encoding of an XML file by reading its header.
Parameters
----------
source : path-like, readable file-like, or callable
Handle that contains the data or function that reads it.
If a function or callable object, it must directly read from a stream.
Non-callable objects must define a ``read`` method.
Returns
-------
encoding : str
"""
with get_xml_iterator(source) as iterator:
start, tag, data, pos = next(iterator)
if not start or tag != 'xml':
raise OSError('Invalid XML file')
# The XML spec says that no encoding === utf-8
return data.get('encoding') or 'utf-8'
def xml_readlines(source):
"""
Get the lines from a given XML file. Correctly determines the
encoding and always returns unicode.
Parameters
----------
source : path-like, readable file-like, or callable
Handle that contains the data or function that reads it.
If a function or callable object, it must directly read from a stream.
Non-callable objects must define a ``read`` method.
Returns
-------
lines : list of unicode
"""
encoding = get_xml_encoding(source)
with data.get_readable_fileobj(source, encoding=encoding) as input:
input.seek(0)
xml_lines = input.readlines()
return xml_lines
|
5a9f3e7b2afe92c114c224c7aeb929fe04a77d42110d2657678051a25dfdde4a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import json
import locale
import os
import urllib.error
from datetime import datetime
import pytest
import numpy as np
from astropy.utils import data, misc
from astropy.io import fits
def test_isiterable():
assert misc.isiterable(2) is False
assert misc.isiterable([2]) is True
assert misc.isiterable([1, 2, 3]) is True
assert misc.isiterable(np.array(2)) is False
assert misc.isiterable(np.array([1, 2, 3])) is True
def test_signal_number_to_name_no_failure():
# Regression test for #5340: ensure signal_number_to_name throws no
# AttributeError (it used ".iteritems()" which was removed in Python3).
misc.signal_number_to_name(0)
@pytest.mark.remote_data
def test_api_lookup():
try:
strurl = misc.find_api_page('astropy.utils.misc', 'dev', False,
timeout=5)
objurl = misc.find_api_page(misc, 'dev', False, timeout=5)
except urllib.error.URLError:
if os.environ.get('CI', False):
pytest.xfail('Timed out in CI')
else:
raise
assert strurl == objurl
assert strurl == 'http://devdocs.astropy.org/utils/index.html#module-astropy.utils.misc' # noqa
# Try a non-dev version
objurl = misc.find_api_page(misc, 'v3.2.1', False, timeout=3)
assert objurl == 'https://docs.astropy.org/en/v3.2.1/utils/index.html#module-astropy.utils.misc' # noqa
def test_skip_hidden():
path = data.get_pkg_data_path('data')
for root, dirs, files in os.walk(path):
assert '.hidden_file.txt' in files
assert 'local.dat' in files
# break after the first level since the data dir contains some other
# subdirectories that don't have these files
break
for root, dirs, files in misc.walk_skip_hidden(path):
assert '.hidden_file.txt' not in files
assert 'local.dat' in files
break
def test_JsonCustomEncoder():
from astropy import units as u
assert json.dumps(np.arange(3), cls=misc.JsonCustomEncoder) == '[0, 1, 2]'
assert json.dumps(1+2j, cls=misc.JsonCustomEncoder) == '[1.0, 2.0]'
assert json.dumps({1, 2, 1}, cls=misc.JsonCustomEncoder) == '[1, 2]'
assert json.dumps(b'hello world \xc3\x85',
cls=misc.JsonCustomEncoder) == '"hello world \\u00c5"'
assert json.dumps({1: 2},
cls=misc.JsonCustomEncoder) == '{"1": 2}' # default
assert json.dumps({1: u.m}, cls=misc.JsonCustomEncoder) == '{"1": "m"}'
# Quantities
tmp = json.dumps({'a': 5*u.cm}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp)
tmpd = {"a": {"unit": "cm", "value": 5.0}}
assert newd == tmpd
tmp2 = json.dumps({'a': np.arange(2)*u.cm}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp2)
tmpd = {"a": {"unit": "cm", "value": [0., 1.]}}
assert newd == tmpd
tmp3 = json.dumps({'a': np.arange(2)*u.erg/u.s}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp3)
tmpd = {"a": {"unit": "erg / s", "value": [0., 1.]}}
assert newd == tmpd
def test_JsonCustomEncoder_FITS_rec_from_files():
with fits.open(fits.util.get_testdata_filepath('variable_length_table.fits')) as hdul:
assert json.dumps(hdul[1].data, cls=misc.JsonCustomEncoder) == \
"[[[45, 56], [11, 3]], [[11, 12, 13], [12, 4]]]"
with fits.open(fits.util.get_testdata_filepath('btable.fits')) as hdul:
assert json.dumps(hdul[1].data, cls=misc.JsonCustomEncoder) == \
'[[1, "Sirius", -1.4500000476837158, "A1V"], ' \
'[2, "Canopus", -0.7300000190734863, "F0Ib"], ' \
'[3, "Rigil Kent", -0.10000000149011612, "G2V"]]'
with fits.open(fits.util.get_testdata_filepath('table.fits')) as hdul:
assert json.dumps(hdul[1].data, cls=misc.JsonCustomEncoder) == \
'[["NGC1001", 11.100000381469727], ' \
'["NGC1002", 12.300000190734863], ' \
'["NGC1003", 15.199999809265137]]'
def test_set_locale():
# First, test if the required locales are available
current = locale.setlocale(locale.LC_ALL)
try:
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
locale.setlocale(locale.LC_ALL, 'fr_FR.utf8')
except locale.Error as e:
pytest.skip(f'Locale error: {e}')
finally:
locale.setlocale(locale.LC_ALL, current)
date = datetime(2000, 10, 1, 0, 0, 0)
day_mon = date.strftime('%a, %b')
with misc._set_locale('en_US.utf8'):
assert date.strftime('%a, %b') == 'Sun, Oct'
with misc._set_locale('fr_FR.utf8'):
assert date.strftime('%a, %b') == 'dim., oct.'
# Back to original
assert date.strftime('%a, %b') == day_mon
with misc._set_locale(current):
assert date.strftime('%a, %b') == day_mon
def test_dtype_bytes_or_chars():
assert misc.dtype_bytes_or_chars(np.dtype(np.float64)) == 8
assert misc.dtype_bytes_or_chars(np.dtype(object)) is None
assert misc.dtype_bytes_or_chars(np.dtype(np.int32)) == 4
assert misc.dtype_bytes_or_chars(np.array(b'12345').dtype) == 5
assert misc.dtype_bytes_or_chars(np.array('12345').dtype) == 5
|
22a4668e3a745dce40d8f03f0b8dc58aaecd240cc262c190acc3a53bfd9d4852 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.utils.data_info import dtype_info_name
from astropy.table import QTable
from astropy.table.index import SlicedIndex
from astropy.time import Time
from astropy.coordinates import SkyCoord
import astropy.units as u
STRING_TYPE_NAMES = {(True, 'S'): 'bytes',
(True, 'U'): 'str'}
DTYPE_TESTS = ((np.array(b'abcd').dtype, STRING_TYPE_NAMES[(True, 'S')] + '4'),
(np.array('abcd').dtype, STRING_TYPE_NAMES[(True, 'U')] + '4'),
('S4', STRING_TYPE_NAMES[(True, 'S')] + '4'),
('U4', STRING_TYPE_NAMES[(True, 'U')] + '4'),
(np.void, 'void'),
(np.int32, 'int32'),
(bool, 'bool'),
(float, 'float64'),
('<f4', 'float32'),
('u8', 'uint64'),
('c16', 'complex128'),
('object', 'object'))
@pytest.mark.parametrize('input,output', DTYPE_TESTS)
def test_dtype_info_name(input, output):
"""
Test that dtype_info_name is giving the expected output
Here the available types::
'b' boolean
'i' (signed) integer
'u' unsigned integer
'f' floating-point
'c' complex-floating point
'O' (Python) objects
'S', 'a' (byte-)string
'U' Unicode
'V' raw data (void)
"""
assert dtype_info_name(input) == output
def test_info_no_copy_numpy():
"""Test that getting a single item from Table column object does not copy info.
See #10889.
"""
col = [1, 2]
t = QTable([col], names=['col'])
t.add_index('col')
val = t['col'][0]
# Returns a numpy scalar (e.g. np.float64) with no .info
assert isinstance(val, np.number)
with pytest.raises(AttributeError):
val.info
val = t['col'][:]
assert val.info.indices == []
cols = [[1, 2] * u.m,
Time([1, 2], format='cxcsec')]
@pytest.mark.parametrize('col', cols)
def test_info_no_copy_mixin_with_index(col):
"""Test that getting a single item from Table column object does not copy info.
See #10889.
"""
t = QTable([col], names=['col'])
t.add_index('col')
val = t['col'][0]
assert 'info' not in val.__dict__
assert val.info.indices == []
val = t['col'][:]
assert 'info' in val.__dict__
assert val.info.indices == []
val = t[:]['col']
assert 'info' in val.__dict__
assert isinstance(val.info.indices[0], SlicedIndex)
def test_info_no_copy_skycoord():
"""Test that getting a single item from Table SkyCoord column object does
not copy info. Cannot create an index on a SkyCoord currently.
"""
col = SkyCoord([1, 2], [1, 2], unit='deg'),
t = QTable([col], names=['col'])
val = t['col'][0]
assert 'info' not in val.__dict__
assert val.info.indices == []
val = t['col'][:]
assert val.info.indices == []
val = t[:]['col']
assert val.info.indices == []
|
3fce94f0cb1eaf6f155b15cba34ff25420749d6d40a134d2c7796892feb592d7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import os
import sys
import stat
import errno
import base64
import random
import shutil
import hashlib
import pathlib
import platform
import tempfile
import warnings
import itertools
import contextlib
import urllib.error
import urllib.parse
import urllib.request
from itertools import islice
from concurrent.futures import ThreadPoolExecutor
from tempfile import NamedTemporaryFile, TemporaryDirectory
import py.path
import pytest
from astropy import units as _u # u is taken
from astropy.config import paths
import astropy.utils.data
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.data import (
CacheMissingWarning,
CacheDamaged,
conf,
_deltemps,
compute_hash,
download_file,
cache_contents,
_tempfilestodel,
get_cached_urls,
is_url_in_cache,
cache_total_size,
get_file_contents,
check_download_cache,
clear_download_cache,
get_pkg_data_fileobj,
get_readable_fileobj,
import_file_to_cache,
export_download_cache,
get_pkg_data_contents,
get_pkg_data_filename,
import_download_cache,
get_free_space_in_dir,
check_free_space_in_dir,
_get_download_cache_loc,
download_files_in_parallel,
is_url,
get_pkg_data_path
)
CI = os.environ.get('CI', False) == "true"
TESTURL = "http://www.astropy.org"
TESTURL2 = "http://www.astropy.org/about.html"
TESTURL_SSL = "https://www.astropy.org"
TESTLOCAL = get_pkg_data_filename(os.path.join("data", "local.dat"))
# NOTE: Python can be built without bz2 or lzma.
from astropy.utils.compat.optional_deps import HAS_BZ2, HAS_LZMA
# For when we need "some" test URLs
FEW = 5
# For stress testing the locking system using multiprocessing
N_PARALLEL_HAMMER = 5 # as high as 500 to replicate a bug
# For stress testing the locking system using threads
# (cheaper, works with coverage)
N_THREAD_HAMMER = 10 # as high as 1000 to replicate a bug
def can_rename_directory_in_use():
with TemporaryDirectory() as d:
d1 = os.path.join(d, "a")
d2 = os.path.join(d, "b")
f1 = os.path.join(d1, "file")
os.mkdir(d1)
with open(f1, "wt") as f:
f.write("some contents\n")
try:
with open(f1):
os.rename(d1, d2)
except PermissionError:
return False
else:
return True
CAN_RENAME_DIRECTORY_IN_USE = can_rename_directory_in_use()
def url_to(path):
return pathlib.Path(path).resolve().as_uri()
@pytest.fixture
def valid_urls(tmpdir):
def _valid_urls(tmpdir):
for i in itertools.count():
c = os.urandom(16).hex()
fn = os.path.join(tmpdir, "valid_" + str(i))
with open(fn, "w") as f:
f.write(c)
u = url_to(fn)
yield u, c
return _valid_urls(tmpdir)
@pytest.fixture
def invalid_urls(tmpdir):
def _invalid_urls(tmpdir):
for i in itertools.count():
fn = os.path.join(tmpdir, "invalid_" + str(i))
if not os.path.exists(fn):
yield url_to(fn)
return _invalid_urls(tmpdir)
@pytest.fixture
def temp_cache(tmpdir):
with paths.set_temp_cache(tmpdir):
yield None
check_download_cache()
def change_tree_permission(d, writable=False):
if writable:
dirperm = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
fileperm = stat.S_IRUSR | stat.S_IWUSR
else:
dirperm = stat.S_IRUSR | stat.S_IXUSR
fileperm = stat.S_IRUSR
for dirpath, dirnames, filenames in os.walk(d):
os.chmod(dirpath, dirperm)
for f in filenames:
os.chmod(os.path.join(dirpath, f), fileperm)
def is_dir_readonly(d):
try:
with NamedTemporaryFile(dir=d):
return False
except PermissionError:
return True
@contextlib.contextmanager
def readonly_dir(d):
try:
change_tree_permission(d, writable=False)
yield
finally:
change_tree_permission(d, writable=True)
@pytest.fixture
def readonly_cache(tmpdir, valid_urls):
with TemporaryDirectory(dir=tmpdir) as d:
# other fixtures use the same tmpdir so we need a subdirectory
# to make into the cache
d = pathlib.Path(d)
with paths.set_temp_cache(d):
us = {u for u, c in islice(valid_urls, FEW)}
urls = {u: download_file(u, cache=True) for u in us}
files = set(d.iterdir())
with readonly_dir(d):
if not is_dir_readonly(d):
pytest.skip("Unable to make directory readonly")
yield urls
assert set(d.iterdir()) == files
check_download_cache()
@pytest.fixture
def fake_readonly_cache(tmpdir, valid_urls, monkeypatch):
def no_mkdir(path, mode=None):
raise OSError(errno.EPERM,
"os.mkdir monkeypatched out")
def no_mkdtemp(*args, **kwargs):
"""On Windows, mkdtemp uses mkdir in a loop and therefore hangs
with it monkeypatched out.
"""
raise OSError(errno.EPERM,
"os.mkdtemp monkeypatched out")
def no_TemporaryDirectory(*args, **kwargs):
raise OSError(errno.EPERM,
"_SafeTemporaryDirectory monkeypatched out")
with TemporaryDirectory(dir=tmpdir) as d:
# other fixtures use the same tmpdir so we need a subdirectory
# to make into the cache
d = pathlib.Path(d)
with paths.set_temp_cache(d):
us = {u for u, c in islice(valid_urls, FEW)}
urls = {u: download_file(u, cache=True) for u in us}
files = set(d.iterdir())
monkeypatch.setattr(os, "mkdir", no_mkdir)
monkeypatch.setattr(tempfile, "mkdtemp", no_mkdtemp)
monkeypatch.setattr(astropy.utils.data,
"_SafeTemporaryDirectory",
no_TemporaryDirectory)
yield urls
assert set(d.iterdir()) == files
check_download_cache()
def test_download_file_basic(valid_urls, temp_cache):
u, c = next(valid_urls)
assert get_file_contents(download_file(u, cache=False)) == c
assert not is_url_in_cache(u)
assert get_file_contents(download_file(u, cache=True)) == c # Cache miss
assert is_url_in_cache(u)
assert get_file_contents(download_file(u, cache=True)) == c # Cache hit
assert get_file_contents(download_file(u, cache=True, sources=[])) == c
def test_download_file_absolute_path(valid_urls, temp_cache):
def is_abs(p):
return p == os.path.abspath(p)
u, c = next(valid_urls)
assert is_abs(download_file(u, cache=False)) # no cache
assert is_abs(download_file(u, cache=True)) # not in cache
assert is_abs(download_file(u, cache=True)) # in cache
for k, v in cache_contents().items():
assert is_abs(v)
def test_unicode_url(valid_urls, temp_cache):
u, c = next(valid_urls)
unicode_url = "http://é—☃—è.com"
download_file(unicode_url, cache=False, sources=[u])
download_file(unicode_url, cache=True, sources=[u])
download_file(unicode_url, cache=True, sources=[])
assert is_url_in_cache(unicode_url)
assert unicode_url in cache_contents()
def test_too_long_url(valid_urls, temp_cache):
u, c = next(valid_urls)
long_url = "http://"+"a"*256+".com"
download_file(long_url, cache=False, sources=[u])
download_file(long_url, cache=True, sources=[u])
download_file(long_url, cache=True, sources=[])
def test_case_collision(valid_urls, temp_cache):
u, c = next(valid_urls)
u2, c2 = next(valid_urls)
f1 = download_file("http://example.com/thing", cache=True, sources=[u])
f2 = download_file("http://example.com/THING", cache=True, sources=[u2])
assert f1 != f2
assert get_file_contents(f1) != get_file_contents(f2)
def test_domain_name_case(valid_urls, temp_cache):
u, c = next(valid_urls)
download_file("http://Example.com/thing", cache=True, sources=[u])
assert is_url_in_cache("http://EXAMPLE.com/thing")
download_file("http://EXAMPLE.com/thing", cache=True, sources=[])
assert is_url_in_cache("Http://example.com/thing")
download_file("Http://example.com/thing", cache=True, sources=[])
@pytest.mark.remote_data(source="astropy")
def test_download_nocache_from_internet():
fnout = download_file(TESTURL, cache=False)
assert os.path.isfile(fnout)
@pytest.fixture
def a_binary_file(tmp_path):
fn = tmp_path / "file"
b_contents = b"\xde\xad\xbe\xef"
with open(fn, "wb") as f:
f.write(b_contents)
yield fn, b_contents
@pytest.fixture
def a_file(tmp_path):
fn = tmp_path / "file.txt"
contents = "contents\n"
with open(fn, "w") as f:
f.write(contents)
yield fn, contents
def test_temp_cache(tmpdir):
dldir0 = _get_download_cache_loc()
check_download_cache()
with paths.set_temp_cache(tmpdir):
dldir1 = _get_download_cache_loc()
check_download_cache()
assert dldir1 != dldir0
dldir2 = _get_download_cache_loc()
check_download_cache()
assert dldir2 != dldir1
assert dldir2 == dldir0
# Check that things are okay even if we exit via an exception
class Special(Exception):
pass
try:
with paths.set_temp_cache(tmpdir):
dldir3 = _get_download_cache_loc()
check_download_cache()
assert dldir3 == dldir1
raise Special
except Special:
pass
dldir4 = _get_download_cache_loc()
check_download_cache()
assert dldir4 != dldir3
assert dldir4 == dldir0
@pytest.mark.parametrize("parallel", [False, True])
def test_download_with_sources_and_bogus_original(
valid_urls, invalid_urls, temp_cache, parallel):
# This is a combined test because the parallel version triggered a nasty
# bug and I was trying to track it down by comparing with the non-parallel
# version. I think the bug was that the parallel downloader didn't respect
# temporary cache settings.
# Make a big list of test URLs
u, c = next(valid_urls)
# as tuples (URL, right_content, wrong_content)
urls = [(u, c, None)]
# where to download the contents
sources = {}
# Set up some URLs to download where the "true" URL is not in the sources
# list; make the true URL valid with different contents so we can tell if
# it was loaded by mistake.
for i, (um, c_bad) in enumerate(islice(valid_urls, FEW)):
assert not is_url_in_cache(um)
sources[um] = []
# For many of them the sources list starts with invalid URLs
for iu in islice(invalid_urls, i):
sources[um].append(iu)
u, c = next(valid_urls)
sources[um].append(u)
urls.append((um, c, c_bad))
# Now fetch them all
if parallel:
rs = download_files_in_parallel([u for (u, c, c_bad) in urls],
cache=True,
sources=sources)
else:
rs = [
download_file(u, cache=True, sources=sources.get(u, None))
for (u, c, c_bad) in urls
]
assert len(rs) == len(urls)
for r, (u, c, c_bad) in zip(rs, urls):
assert get_file_contents(r) == c
assert get_file_contents(r) != c_bad
assert is_url_in_cache(u)
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_download_file_threaded_many(temp_cache, valid_urls):
"""Hammer download_file with multiple threaded requests.
The goal is to stress-test the locking system. Normal parallel downloading
also does this but coverage tools lose track of which paths are explored.
"""
urls = list(islice(valid_urls, N_THREAD_HAMMER))
with ThreadPoolExecutor(max_workers=len(urls)) as P:
r = list(P.map(lambda u: download_file(u, cache=True),
[u for (u, c) in urls]))
check_download_cache()
assert len(r) == len(urls)
for r, (u, c) in zip(r, urls):
assert get_file_contents(r) == c
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_threaded_segfault(valid_urls):
"""Demonstrate urllib's segfault."""
def slurp_url(u):
with urllib.request.urlopen(u) as remote:
block = True
while block:
block = remote.read(1024)
urls = list(islice(valid_urls, N_THREAD_HAMMER))
with ThreadPoolExecutor(max_workers=len(urls)) as P:
list(P.map(lambda u: slurp_url(u),
[u for (u, c) in urls]))
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_download_file_threaded_many_partial_success(
temp_cache, valid_urls, invalid_urls):
"""Hammer download_file with multiple threaded requests.
Because some of these requests fail, the locking context manager is
exercised with exceptions as well as success returns. I do not expect many
surprises from the threaded version, but the process version gave trouble
here.
"""
urls = []
contents = {}
for (u, c), i in islice(zip(valid_urls, invalid_urls), N_THREAD_HAMMER):
urls.append(u)
contents[u] = c
urls.append(i)
def get(u):
try:
return download_file(u, cache=True)
except OSError:
return None
with ThreadPoolExecutor(max_workers=len(urls)) as P:
r = list(P.map(get, urls))
check_download_cache()
assert len(r) == len(urls)
for r, u in zip(r, urls):
if u in contents:
assert get_file_contents(r) == contents[u]
else:
assert r is None
def test_clear_download_cache(valid_urls):
u1, c1 = next(valid_urls)
download_file(u1, cache=True)
u2, c2 = next(valid_urls)
download_file(u2, cache=True)
assert is_url_in_cache(u2)
clear_download_cache(u2)
assert not is_url_in_cache(u2)
assert is_url_in_cache(u1)
u3, c3 = next(valid_urls)
f3 = download_file(u3, cache=True)
assert is_url_in_cache(u3)
clear_download_cache(f3)
assert not is_url_in_cache(u3)
assert is_url_in_cache(u1)
u4, c4 = next(valid_urls)
f4 = download_file(u4, cache=True)
assert is_url_in_cache(u4)
clear_download_cache(compute_hash(f4))
assert not is_url_in_cache(u4)
assert is_url_in_cache(u1)
def test_clear_download_multiple_references_doesnt_corrupt_storage(temp_cache, tmpdir):
"""Check that files with the same hash don't confuse the storage."""
content = "Test data; doesn't matter much.\n"
def make_url():
with NamedTemporaryFile("w", dir=str(tmpdir), delete=False) as f:
f.write(content)
url = url_to(f.name)
clear_download_cache(url)
filename = download_file(url, cache=True)
return url, filename
a_url, a_filename = make_url()
clear_download_cache(a_filename)
assert not is_url_in_cache(a_url)
f_url, f_filename = make_url()
g_url, g_filename = make_url()
assert f_url != g_url
assert is_url_in_cache(f_url)
assert is_url_in_cache(g_url)
clear_download_cache(f_url)
assert not is_url_in_cache(f_url)
assert is_url_in_cache(g_url)
assert os.path.exists(
g_filename
), "Contents should not be deleted while a reference exists"
clear_download_cache(g_url)
assert not os.path.exists(
g_filename
), "No reference exists any more, file should be deleted"
@pytest.mark.parametrize("use_cache", [False, True])
def test_download_file_local_cache_survives(tmpdir, temp_cache, use_cache):
"""Confirm that downloading a local file does not delete it.
When implemented with urlretrieve (rather than urlopen) local files are
not copied to create temporaries, so importing them to the cache deleted
the original from wherever it was in the filesystem. I lost some built-in
astropy data.
"""
fn = tmpdir / "file"
contents = "some text"
with open(fn, "w") as f:
f.write(contents)
u = url_to(fn)
f = download_file(u, cache=use_cache)
assert fn not in _tempfilestodel, "File should not be deleted!"
assert os.path.isfile(fn), "File should not be deleted!"
assert get_file_contents(f) == contents
def test_sources_normal(temp_cache, valid_urls, invalid_urls):
primary, contents = next(valid_urls)
fallback1 = next(invalid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_fallback(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_ignore_primary(temp_cache, valid_urls, invalid_urls):
primary, bogus = next(valid_urls)
fallback1, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_multiple(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1 = next(invalid_urls)
fallback2, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1, fallback2])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
assert not is_url_in_cache(fallback2)
def test_sources_multiple_missing(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1 = next(invalid_urls)
fallback2 = next(invalid_urls)
with pytest.raises(urllib.error.URLError):
download_file(primary, cache=True, sources=[primary, fallback1, fallback2])
assert not is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
assert not is_url_in_cache(fallback2)
def test_update_url(tmpdir, temp_cache):
with TemporaryDirectory(dir=tmpdir) as d:
f_name = os.path.join(d, "f")
with open(f_name, "w") as f:
f.write("old")
f_url = url_to(f.name)
assert get_file_contents(download_file(f_url, cache=True)) == "old"
with open(f_name, "w") as f:
f.write("new")
assert get_file_contents(download_file(f_url, cache=True)) == "old"
assert get_file_contents(download_file(f_url, cache="update")) == "new"
# Now the URL doesn't exist any more.
assert not os.path.exists(f_name)
with pytest.raises(urllib.error.URLError):
# Direct download should fail
download_file(f_url, cache=False)
assert get_file_contents(download_file(f_url, cache=True)) == "new", \
"Cached version should still exist"
with pytest.raises(urllib.error.URLError):
# cannot download new version to check for updates
download_file(f_url, cache="update")
assert get_file_contents(download_file(f_url, cache=True)) == "new", \
"Failed update should not remove the current version"
@pytest.mark.remote_data(source="astropy")
def test_download_noprogress():
fnout = download_file(TESTURL, cache=False, show_progress=False)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_download_cache():
download_dir = _get_download_cache_loc()
# Download the test URL and make sure it exists, then clear just that
# URL and make sure it got deleted.
fnout = download_file(TESTURL, cache=True)
assert os.path.isdir(download_dir)
assert os.path.isfile(fnout)
clear_download_cache(TESTURL)
assert not os.path.exists(fnout)
# Clearing download cache succeeds even if the URL does not exist.
clear_download_cache("http://this_was_never_downloaded_before.com")
# Make sure lockdir was released
lockdir = os.path.join(download_dir, "lock")
assert not os.path.isdir(lockdir), "Cache dir lock was not released!"
@pytest.mark.remote_data(source="astropy")
def test_download_certificate_verification_failed():
"""Tests for https://github.com/astropy/astropy/pull/10434"""
# First test the expected exception when download fails due to a
# certificate verification error; we simulate this by passing a bogus
# CA directory to the ssl_context argument
ssl_context = {'cafile': None, 'capath': '/does/not/exist'}
msg = f'Verification of TLS/SSL certificate at {TESTURL_SSL} failed'
with pytest.raises(urllib.error.URLError, match=msg):
download_file(TESTURL_SSL, cache=False, ssl_context=ssl_context)
with pytest.warns(AstropyWarning, match=msg) as warning_lines:
fnout = download_file(TESTURL_SSL, cache=False,
ssl_context=ssl_context, allow_insecure=True)
assert len(warning_lines) == 1
assert os.path.isfile(fnout)
def test_download_cache_after_clear(tmpdir, temp_cache, valid_urls):
testurl, contents = next(valid_urls)
# Test issues raised in #4427 with clear_download_cache() without a URL,
# followed by subsequent download.
download_dir = _get_download_cache_loc()
fnout = download_file(testurl, cache=True)
assert os.path.isfile(fnout)
clear_download_cache()
assert not os.path.exists(fnout)
assert not os.path.exists(download_dir)
fnout = download_file(testurl, cache=True)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_download_parallel_from_internet_works(temp_cache):
main_url = conf.dataurl
mirror_url = conf.dataurl_mirror
fileloc = "intersphinx/README"
urls = []
sources = {}
for s in ["", fileloc]:
urls.append(main_url + s)
sources[urls[-1]] = [urls[-1], mirror_url+s]
fnout = download_files_in_parallel(urls, sources=sources)
assert all([os.path.isfile(f) for f in fnout]), fnout
@pytest.mark.parametrize("method", [None, "spawn"])
def test_download_parallel_fills_cache(tmpdir, valid_urls, method):
urls = []
# tmpdir is shared between many tests, and that can cause weird
# interactions if we set the temporary cache too directly
with paths.set_temp_cache(tmpdir):
for um, c in islice(valid_urls, FEW):
assert not is_url_in_cache(um)
urls.append((um, c))
rs = download_files_in_parallel(
[u for (u, c) in urls], multiprocessing_start_method=method
)
assert len(rs) == len(urls)
url_set = {u for (u, c) in urls}
assert url_set <= set(get_cached_urls())
for r, (u, c) in zip(rs, urls):
assert get_file_contents(r) == c
check_download_cache()
assert not url_set.intersection(get_cached_urls())
check_download_cache()
def test_download_parallel_with_empty_sources(valid_urls, temp_cache):
urls = []
sources = {}
for um, c in islice(valid_urls, FEW):
assert not is_url_in_cache(um)
urls.append((um, c))
rs = download_files_in_parallel([u for (u, c) in urls], sources=sources)
assert len(rs) == len(urls)
# u = set(u for (u, c) in urls)
# assert u <= set(get_cached_urls())
check_download_cache()
for r, (u, c) in zip(rs, urls):
assert get_file_contents(r) == c
def test_download_parallel_with_sources_and_bogus_original(
valid_urls, invalid_urls, temp_cache
):
u, c = next(valid_urls)
urls = [(u, c, None)]
sources = {}
for i, (um, c_bad) in enumerate(islice(valid_urls, FEW)):
assert not is_url_in_cache(um)
sources[um] = []
for iu in islice(invalid_urls, i):
sources[um].append(iu)
u, c = next(valid_urls)
sources[um].append(u)
urls.append((um, c, c_bad))
rs = download_files_in_parallel([u for (u, c, c_bad) in urls], sources=sources)
assert len(rs) == len(urls)
# u = set(u for (u, c, c_bad) in urls)
# assert u <= set(get_cached_urls())
for r, (u, c, c_bad) in zip(rs, urls):
assert get_file_contents(r) == c
assert get_file_contents(r) != c_bad
def test_download_parallel_many(temp_cache, valid_urls):
td = list(islice(valid_urls, N_PARALLEL_HAMMER))
r = download_files_in_parallel([u for (u, c) in td])
assert len(r) == len(td)
for r, (u, c) in zip(r, td):
assert get_file_contents(r) == c
def test_download_parallel_partial_success(temp_cache, valid_urls, invalid_urls):
"""Check that a partially successful download works.
Even in the presence of many requested URLs, presumably hitting all the
parallelism this system can manage, a download failure leads to a tidy
shutdown.
"""
td = list(islice(valid_urls, N_PARALLEL_HAMMER))
u_bad = next(invalid_urls)
with pytest.raises(urllib.request.URLError):
download_files_in_parallel([u_bad] + [u for (u, c) in td])
# Actually some files may get downloaded, others not.
# Is this good? Should we stubbornly keep trying?
# assert not any([is_url_in_cache(u) for (u, c) in td])
@pytest.mark.slow
def test_download_parallel_partial_success_lock_safe(temp_cache, valid_urls, invalid_urls):
"""Check that a partially successful parallel download leaves the cache unlocked.
This needs to be repeated many times because race conditions are what cause
this sort of thing, especially situations where a process might be forcibly
shut down while it holds the lock.
"""
s = random.getstate()
try:
random.seed(0)
for _ in range(N_PARALLEL_HAMMER):
td = list(islice(valid_urls, FEW))
u_bad = next(invalid_urls)
urls = [u_bad] + [u for (u, c) in td]
random.shuffle(urls)
with pytest.raises(urllib.request.URLError):
download_files_in_parallel(urls)
finally:
random.setstate(s)
def test_download_parallel_update(temp_cache, tmpdir):
td = []
for i in range(N_PARALLEL_HAMMER):
c = f"{i:04d}"
fn = os.path.join(tmpdir, c)
with open(fn, "w") as f:
f.write(c)
u = url_to(fn)
clear_download_cache(u)
td.append((fn, u, c))
r1 = download_files_in_parallel([u for (fn, u, c) in td])
assert len(r1) == len(td)
for r_1, (fn, u, c) in zip(r1, td):
assert get_file_contents(r_1) == c
td2 = []
for (fn, u, c) in td:
c_plus = c + " updated"
fn = os.path.join(tmpdir, c)
with open(fn, "w") as f:
f.write(c_plus)
td2.append((fn, u, c, c_plus))
r2 = download_files_in_parallel([u for (fn, u, c) in td], cache=True)
assert len(r2) == len(td)
for r_2, (fn, u, c, c_plus) in zip(r2, td2):
assert get_file_contents(r_2) == c
assert c != c_plus
r3 = download_files_in_parallel([u for (fn, u, c) in td], cache="update")
assert len(r3) == len(td)
for r_3, (fn, u, c, c_plus) in zip(r3, td2):
assert get_file_contents(r_3) != c
assert get_file_contents(r_3) == c_plus
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_update_parallel(temp_cache, valid_urls):
u, c = next(valid_urls)
u2, c2 = next(valid_urls)
f = download_file(u, cache=True)
assert get_file_contents(f) == c
def update(i):
return download_file(u, cache="update", sources=[u2])
with ThreadPoolExecutor(max_workers=N_THREAD_HAMMER) as P:
r = set(P.map(update, range(N_THREAD_HAMMER)))
check_download_cache()
for f in r:
assert get_file_contents(f) == c2
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_update_parallel_multi(temp_cache, valid_urls):
u, c = next(valid_urls)
iucs = list(islice(valid_urls, N_THREAD_HAMMER))
f = download_file(u, cache=True)
assert get_file_contents(f) == c
def update(uc):
u2, c2 = uc
return download_file(u, cache="update", sources=[u2]), c2
with ThreadPoolExecutor(max_workers=len(iucs)) as P:
r = list(P.map(update, iucs))
check_download_cache()
assert any(get_file_contents(f) == c for (f, c) in r)
@pytest.mark.remote_data(source="astropy")
def test_url_nocache():
with get_readable_fileobj(TESTURL, cache=False, encoding="utf-8") as page:
assert page.read().find("Astropy") > -1
def test_find_by_hash(valid_urls, temp_cache):
testurl, contents = next(valid_urls)
p = download_file(testurl, cache=True)
hash = compute_hash(p)
hashstr = "hash/" + hash
fnout = get_pkg_data_filename(hashstr)
assert os.path.isfile(fnout)
clear_download_cache(fnout)
assert not os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_find_invalid():
# this is of course not a real data file and not on any remote server, but
# it should *try* to go to the remote server
with pytest.raises(urllib.error.URLError):
get_pkg_data_filename(
"kjfrhgjklahgiulrhgiuraehgiurhgiuhreglhurieghruelighiuerahiulruli"
)
@pytest.mark.parametrize("package", [None, "astropy", "numpy"])
def test_get_invalid(package):
"""Test can create a file path to an invalid file."""
path = get_pkg_data_path("kjfrhgjkla", "hgiulrhgiu", package=package)
assert not os.path.isfile(path)
assert not os.path.isdir(path)
# Package data functions
@pytest.mark.parametrize(
("filename"), ["local.dat", "local.dat.gz", "local.dat.bz2", "local.dat.xz"]
)
def test_local_data_obj(filename):
if ((not HAS_BZ2 and "bz2" in filename) or
(not HAS_LZMA and "xz" in filename)):
with pytest.raises(ValueError) as e:
with get_pkg_data_fileobj(
os.path.join("data", filename), encoding="binary"
) as f:
f.readline()
# assert f.read().rstrip() == b'CONTENT'
assert " format files are not supported" in str(e.value)
else:
with get_pkg_data_fileobj(
os.path.join("data", filename), encoding="binary"
) as f:
f.readline()
assert f.read().rstrip() == b"CONTENT"
@pytest.fixture(params=["invalid.dat.bz2", "invalid.dat.gz"])
def bad_compressed(request, tmpdir):
# These contents have valid headers for their respective file formats, but
# are otherwise malformed and invalid.
bz_content = b"BZhinvalid"
gz_content = b"\x1f\x8b\x08invalid"
datafile = tmpdir.join(request.param)
filename = datafile.strpath
if filename.endswith(".bz2"):
contents = bz_content
elif filename.endswith(".gz"):
contents = gz_content
else:
contents = "invalid"
datafile.write(contents, mode="wb")
return filename
def test_local_data_obj_invalid(bad_compressed):
is_bz2 = bad_compressed.endswith(".bz2")
is_xz = bad_compressed.endswith(".xz")
# Note, since these invalid files are created on the fly in order to avoid
# problems with detection by antivirus software
# (see https://github.com/astropy/astropy/issues/6520), it is no longer
# possible to use ``get_pkg_data_fileobj`` to read the files. Technically,
# they're not local anymore: they just live in a temporary directory
# created by pytest. However, we can still use get_readable_fileobj for the
# test.
if (not HAS_BZ2 and is_bz2) or (not HAS_LZMA and is_xz):
with pytest.raises(ModuleNotFoundError,
match=r'does not provide the [lb]z[2m]a? module\.'):
with get_readable_fileobj(bad_compressed, encoding="binary") as f:
f.read()
else:
with get_readable_fileobj(bad_compressed, encoding="binary") as f:
assert f.read().rstrip().endswith(b"invalid")
def test_local_data_name():
assert os.path.isfile(TESTLOCAL) and TESTLOCAL.endswith("local.dat")
# TODO: if in the future, the root data/ directory is added in, the below
# test should be uncommented and the README.rst should be replaced with
# whatever file is there
# get something in the astropy root
# fnout2 = get_pkg_data_filename('../../data/README.rst')
# assert os.path.isfile(fnout2) and fnout2.endswith('README.rst')
def test_data_name_third_party_package():
"""Regression test for issue #1256
Tests that `get_pkg_data_filename` works in a third-party package that
doesn't make any relative imports from the module it's used from.
Uses a test package under ``data/test_package``.
"""
# Get the actual data dir:
data_dir = os.path.join(os.path.dirname(__file__), "data")
sys.path.insert(0, data_dir)
try:
import test_package
filename = test_package.get_data_filename()
assert os.path.normcase(filename) == (
os.path.normcase(os.path.join(data_dir, "test_package", "data", "foo.txt"))
)
finally:
sys.path.pop(0)
def test_local_data_nonlocalfail():
# this would go *outside* the astropy tree
with pytest.raises(RuntimeError):
get_pkg_data_filename("../../../data/README.rst")
def test_compute_hash(tmpdir):
rands = b"1234567890abcdefghijklmnopqrstuvwxyz"
filename = tmpdir.join("tmp.dat").strpath
with open(filename, "wb") as ntf:
ntf.write(rands)
ntf.flush()
chhash = compute_hash(filename)
shash = hashlib.md5(rands).hexdigest()
assert chhash == shash
def test_get_pkg_data_contents():
with get_pkg_data_fileobj("data/local.dat") as f:
contents1 = f.read()
contents2 = get_pkg_data_contents("data/local.dat")
assert contents1 == contents2
@pytest.mark.remote_data(source="astropy")
def test_data_noastropy_fallback(monkeypatch):
"""
Tests to make sure the default behavior when the cache directory can't
be located is correct
"""
# better yet, set the configuration to make sure the temp files are deleted
conf.delete_temporary_downloads_at_exit = True
# make sure the config and cache directories are not searched
monkeypatch.setenv("XDG_CONFIG_HOME", "foo")
monkeypatch.delenv("XDG_CONFIG_HOME")
monkeypatch.setenv("XDG_CACHE_HOME", "bar")
monkeypatch.delenv("XDG_CACHE_HOME")
monkeypatch.setattr(paths.set_temp_config, "_temp_path", None)
monkeypatch.setattr(paths.set_temp_cache, "_temp_path", None)
# make sure the _find_or_create_astropy_dir function fails as though the
# astropy dir could not be accessed
def osraiser(dirnm, linkto, pkgname=None):
raise OSError()
monkeypatch.setattr(paths, '_find_or_create_root_dir', osraiser)
with pytest.raises(OSError):
# make sure the config dir search fails
paths.get_cache_dir(rootname='astropy')
with pytest.warns(CacheMissingWarning) as warning_lines:
fnout = download_file(TESTURL, cache=True)
n_warns = len(warning_lines)
partial_warn_msgs = ['remote data cache could not be accessed', 'temporary file']
if n_warns == 4:
partial_warn_msgs.extend(['socket', 'socket'])
for wl in warning_lines:
cur_w = str(wl).lower()
for i, partial_msg in enumerate(partial_warn_msgs):
if partial_msg in cur_w:
del partial_warn_msgs[i]
break
assert len(partial_warn_msgs) == 0, f'Got some unexpected warnings: {partial_warn_msgs}'
assert n_warns in (2, 4), f'Expected 2 or 4 warnings, got {n_warns}'
assert os.path.isfile(fnout)
# clearing the cache should be a no-up that doesn't affect fnout
with pytest.warns(CacheMissingWarning,
match=r".*Not clearing data cache - cache inaccessible.*"):
clear_download_cache(TESTURL)
assert os.path.isfile(fnout)
# now remove it so tests don't clutter up the temp dir this should get
# called at exit, anyway, but we do it here just to make sure it's working
# correctly
_deltemps()
assert not os.path.isfile(fnout)
# now try with no cache
fnnocache = download_file(TESTURL, cache=False)
with open(fnnocache, "rb") as page:
assert page.read().decode("utf-8").find("Astropy") > -1
# no warnings should be raise in fileobj because cache is unnecessary
@pytest.mark.parametrize(
("filename"),
[
"unicode.txt",
"unicode.txt.gz",
pytest.param(
"unicode.txt.bz2",
marks=pytest.mark.xfail(not HAS_BZ2, reason="no bz2 support"),
),
pytest.param(
"unicode.txt.xz",
marks=pytest.mark.xfail(not HAS_LZMA, reason="no lzma support"),
),
],
)
def test_read_unicode(filename):
contents = get_pkg_data_contents(os.path.join("data", filename), encoding="utf-8")
assert isinstance(contents, str)
contents = contents.splitlines()[1]
assert contents == "האסטרונומי פייתון"
contents = get_pkg_data_contents(os.path.join("data", filename), encoding="binary")
assert isinstance(contents, bytes)
x = contents.splitlines()[1]
assert x == (
b"\xff\xd7\x94\xd7\x90\xd7\xa1\xd7\x98\xd7\xa8\xd7\x95\xd7\xa0"
b"\xd7\x95\xd7\x9e\xd7\x99 \xd7\xa4\xd7\x99\xd7\x99\xd7\xaa\xd7\x95\xd7\x9f"[1:]
)
def test_compressed_stream():
gzipped_data = (
b"H4sICIxwG1AAA2xvY2FsLmRhdAALycgsVkjLzElVANKlxakpCpl5CiUZqQ"
b"olqcUl8Tn5yYk58SmJJYnxWmCRzLx0hbTSvOSSzPy8Yi5nf78QV78QLgAlLytnRQAAAA=="
)
gzipped_data = base64.b64decode(gzipped_data)
assert isinstance(gzipped_data, bytes)
class FakeStream:
"""
A fake stream that has `read`, but no `seek`.
"""
def __init__(self, data):
self.data = data
def read(self, nbytes=None):
if nbytes is None:
result = self.data
self.data = b""
else:
result = self.data[:nbytes]
self.data = self.data[nbytes:]
return result
stream = FakeStream(gzipped_data)
with get_readable_fileobj(stream, encoding="binary") as f:
f.readline()
assert f.read().rstrip() == b"CONTENT"
@pytest.mark.remote_data(source="astropy")
def test_invalid_location_download_raises_urlerror():
"""
checks that download_file gives a URLError and not an AttributeError,
as its code pathway involves some fiddling with the exception.
"""
with pytest.raises(urllib.error.URLError):
download_file("http://www.astropy.org/nonexistentfile")
def test_invalid_location_download_noconnect():
"""
checks that download_file gives an OSError if the socket is blocked
"""
# This should invoke socket's monkeypatched failure
with pytest.raises(OSError):
download_file("http://astropy.org/nonexistentfile")
@pytest.mark.remote_data(source="astropy")
def test_is_url_in_cache_remote():
assert not is_url_in_cache("http://astropy.org/nonexistentfile")
download_file(TESTURL, cache=True, show_progress=False)
assert is_url_in_cache(TESTURL)
def test_is_url_in_cache_local(temp_cache, valid_urls, invalid_urls):
testurl, contents = next(valid_urls)
nonexistent = next(invalid_urls)
assert not is_url_in_cache(testurl)
assert not is_url_in_cache(nonexistent)
download_file(testurl, cache=True, show_progress=False)
assert is_url_in_cache(testurl)
assert not is_url_in_cache(nonexistent)
# If non-deterministic failure happens see
# https://github.com/astropy/astropy/issues/9765
def test_check_download_cache(tmpdir, temp_cache, valid_urls, invalid_urls):
testurl, testurl_contents = next(valid_urls)
testurl2, testurl2_contents = next(valid_urls)
zip_file_name = os.path.join(tmpdir, "the.zip")
clear_download_cache()
assert not check_download_cache()
download_file(testurl, cache=True)
check_download_cache()
download_file(testurl2, cache=True)
check_download_cache()
export_download_cache(zip_file_name, [testurl, testurl2])
check_download_cache()
clear_download_cache(testurl2)
check_download_cache()
import_download_cache(zip_file_name, [testurl])
check_download_cache()
def test_export_import_roundtrip_one(tmpdir, temp_cache, valid_urls):
testurl, contents = next(valid_urls)
f = download_file(testurl, cache=True, show_progress=False)
assert get_file_contents(f) == contents
initial_urls_in_cache = set(get_cached_urls())
zip_file_name = os.path.join(tmpdir, "the.zip")
export_download_cache(zip_file_name, [testurl])
clear_download_cache(testurl)
import_download_cache(zip_file_name)
assert is_url_in_cache(testurl)
assert set(get_cached_urls()) == initial_urls_in_cache
assert (
get_file_contents(download_file(testurl, cache=True, show_progress=False))
== contents
)
def test_export_url_not_present(temp_cache, valid_urls):
testurl, contents = next(valid_urls)
with NamedTemporaryFile("wb") as zip_file:
assert not is_url_in_cache(testurl)
with pytest.raises(KeyError):
export_download_cache(zip_file, [testurl])
def test_import_one(tmpdir, temp_cache, valid_urls):
testurl, testurl_contents = next(valid_urls)
testurl2, testurl2_contents = next(valid_urls)
zip_file_name = os.path.join(tmpdir, "the.zip")
download_file(testurl, cache=True)
download_file(testurl2, cache=True)
assert is_url_in_cache(testurl2)
export_download_cache(zip_file_name, [testurl, testurl2])
clear_download_cache(testurl)
clear_download_cache(testurl2)
import_download_cache(zip_file_name, [testurl])
assert is_url_in_cache(testurl)
assert not is_url_in_cache(testurl2)
def test_export_import_roundtrip(tmpdir, temp_cache, valid_urls):
zip_file_name = os.path.join(tmpdir, "the.zip")
for u, c in islice(valid_urls, FEW):
download_file(u, cache=True)
initial_urls_in_cache = set(get_cached_urls())
export_download_cache(zip_file_name)
clear_download_cache()
import_download_cache(zip_file_name)
assert set(get_cached_urls()) == initial_urls_in_cache
def test_export_import_roundtrip_stream(temp_cache, valid_urls):
for u, c in islice(valid_urls, FEW):
download_file(u, cache=True)
initial_urls_in_cache = set(get_cached_urls())
with io.BytesIO() as f:
export_download_cache(f)
b = f.getvalue()
clear_download_cache()
with io.BytesIO(b) as f:
import_download_cache(f)
assert set(get_cached_urls()) == initial_urls_in_cache
def test_export_overwrite_flag_works(temp_cache, valid_urls, tmpdir):
fn = tmpdir / "f.zip"
c = b"Some contents\nto check later"
with open(fn, "wb") as f:
f.write(c)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True)
with pytest.raises(FileExistsError):
export_download_cache(fn)
assert get_file_contents(fn, encoding='binary') == c
export_download_cache(fn, overwrite=True)
assert get_file_contents(fn, encoding='binary') != c
def test_export_import_roundtrip_different_location(tmpdir, valid_urls):
original_cache = tmpdir / "original"
os.mkdir(original_cache)
zip_file_name = tmpdir / "the.zip"
urls = list(islice(valid_urls, FEW))
initial_urls_in_cache = {u for (u, c) in urls}
with paths.set_temp_cache(original_cache):
for u, c in urls:
download_file(u, cache=True)
assert set(get_cached_urls()) == initial_urls_in_cache
export_download_cache(zip_file_name)
new_cache = tmpdir / "new"
os.mkdir(new_cache)
with paths.set_temp_cache(new_cache):
import_download_cache(zip_file_name)
check_download_cache()
assert set(get_cached_urls()) == initial_urls_in_cache
for (u, c) in urls:
assert get_file_contents(download_file(u, cache=True)) == c
def test_cache_size_is_zero_when_empty(temp_cache):
assert not get_cached_urls()
assert cache_total_size() == 0
def test_cache_size_changes_correctly_when_files_are_added_and_removed(
temp_cache, valid_urls
):
u, c = next(valid_urls)
clear_download_cache(u)
s_i = cache_total_size()
download_file(u, cache=True)
assert cache_total_size() == s_i + len(c) + len(u.encode("utf-8"))
clear_download_cache(u)
assert cache_total_size() == s_i
def test_cache_contents_agrees_with_get_urls(temp_cache, valid_urls):
r = []
for a, a_c in islice(valid_urls, FEW):
a_f = download_file(a, cache=True)
r.append((a, a_c, a_f))
assert set(cache_contents().keys()) == set(get_cached_urls())
for (u, c, h) in r:
assert cache_contents()[u] == h
@pytest.mark.parametrize('desired_size',
[1_000_000_000_000_000_000, 1 * _u.Ebyte])
def test_free_space_checker_huge(tmpdir, desired_size):
with pytest.raises(OSError):
check_free_space_in_dir(str(tmpdir), desired_size)
def test_get_free_space_file_directory(tmpdir):
fn = tmpdir / "file"
with open(fn, "w"):
pass
with pytest.raises(OSError):
get_free_space_in_dir(str(fn))
free_space = get_free_space_in_dir(str(tmpdir))
assert free_space > 0 and not hasattr(free_space, 'unit')
# TODO: If unit=True starts to auto-guess prefix, this needs updating.
free_space = get_free_space_in_dir(str(tmpdir), unit=True)
assert free_space > 0 and free_space.unit == _u.byte
free_space = get_free_space_in_dir(str(tmpdir), unit=_u.Mbit)
assert free_space > 0 and free_space.unit == _u.Mbit
def test_download_file_bogus_settings(invalid_urls, temp_cache):
u = next(invalid_urls)
with pytest.raises(KeyError):
download_file(u, sources=[])
def test_download_file_local_directory(tmpdir):
"""Make sure we get a URLError rather than OSError even if it's a
local directory."""
with pytest.raises(urllib.request.URLError):
download_file(url_to(tmpdir))
def test_download_file_schedules_deletion(valid_urls):
u, c = next(valid_urls)
f = download_file(u)
assert f in _tempfilestodel
# how to test deletion actually occurs?
def test_clear_download_cache_refuses_to_delete_outside_the_cache(tmpdir):
fn = os.path.abspath(os.path.join(tmpdir, "file"))
with open(fn, "w") as f:
f.write("content")
assert os.path.exists(fn)
with pytest.raises(RuntimeError):
clear_download_cache(fn)
assert os.path.exists(fn)
def test_check_download_cache_finds_bogus_entries(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file(u, cache=True)
dldir = _get_download_cache_loc()
bf = os.path.abspath(os.path.join(dldir, "bogus"))
with open(bf, "wt") as f:
f.write("bogus file that exists")
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert bf in e.value.bad_files
clear_download_cache()
def test_check_download_cache_finds_bogus_subentries(temp_cache, valid_urls):
u, c = next(valid_urls)
f = download_file(u, cache=True)
bf = os.path.abspath(os.path.join(os.path.dirname(f), "bogus"))
with open(bf, "wt") as f:
f.write("bogus file that exists")
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert bf in e.value.bad_files
clear_download_cache()
def test_check_download_cache_cleanup(temp_cache, valid_urls):
u, c = next(valid_urls)
fn = download_file(u, cache=True)
dldir = _get_download_cache_loc()
bf1 = os.path.abspath(os.path.join(dldir, "bogus1"))
with open(bf1, "wt") as f:
f.write("bogus file that exists")
bf2 = os.path.abspath(os.path.join(os.path.dirname(fn), "bogus2"))
with open(bf2, "wt") as f:
f.write("other bogus file that exists")
bf3 = os.path.abspath(os.path.join(dldir, "contents"))
with open(bf3, "wt") as f:
f.write("awkwardly-named bogus file that exists")
u2, c2 = next(valid_urls)
f2 = download_file(u, cache=True)
os.unlink(f2)
bf4 = os.path.dirname(f2)
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert set(e.value.bad_files) == {bf1, bf2, bf3, bf4}
for bf in e.value.bad_files:
clear_download_cache(bf)
# download cache will be checked on exit
def test_download_cache_update_doesnt_damage_cache(temp_cache, valid_urls):
u, _ = next(valid_urls)
download_file(u, cache=True)
download_file(u, cache="update")
def test_cache_dir_is_actually_a_file(tmpdir, valid_urls):
"""Ensure that bogus cache settings are handled sensibly.
Because the user can specify the cache location in a config file, and
because they might try to deduce the location by looking around at what's
in their directory tree, and because the cache directory is actual several
tree levels down from the directory set in the config file, it's important
to check what happens if each of the steps in the path is wrong somehow.
"""
def check_quietly_ignores_bogus_cache():
"""We want a broken cache to produce a warning but then astropy should
act like there isn't a cache.
"""
with pytest.warns(CacheMissingWarning):
assert not get_cached_urls()
with pytest.warns(CacheMissingWarning):
assert not is_url_in_cache("http://www.example.com/")
with pytest.warns(CacheMissingWarning):
assert not cache_contents()
with pytest.warns(CacheMissingWarning):
u, c = next(valid_urls)
r = download_file(u, cache=True)
assert get_file_contents(r) == c
# check the filename r appears in a warning message?
# check r is added to the delete_at_exit list?
# in fact should there be testing of the delete_at_exit mechanism,
# as far as that is possible?
with pytest.warns(CacheMissingWarning):
assert not is_url_in_cache(u)
with pytest.warns(CacheMissingWarning):
with pytest.raises(OSError):
check_download_cache()
dldir = _get_download_cache_loc()
# set_temp_cache acts weird if it is pointed at a file (see below)
# but we want to see what happens when the cache is pointed
# at a file instead of a directory, so make a directory we can
# replace later.
fn = str(tmpdir / "file")
ct = "contents\n"
os.mkdir(fn)
with paths.set_temp_cache(fn):
shutil.rmtree(fn)
with open(fn, "w") as f:
f.write(ct)
with pytest.raises(OSError):
paths.get_cache_dir()
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(fn) == ct, "File should not be harmed."
# See what happens when set_temp_cache is pointed at a file
with pytest.raises(OSError):
with paths.set_temp_cache(fn):
pass
assert dldir == _get_download_cache_loc()
assert get_file_contents(str(fn)) == ct
# Now the cache directory is normal but the subdirectory it wants
# to make is a file
cd = str(tmpdir / "astropy")
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmpdir):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
# Ditto one level deeper
os.makedirs(cd)
cd = str(tmpdir / "astropy" / "download")
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmpdir):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
# Ditto another level deeper
os.makedirs(cd)
cd = str(tmpdir / "astropy" / "download" / "url")
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmpdir):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
def test_get_fileobj_str(a_file):
fn, c = a_file
with get_readable_fileobj(str(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_localpath(a_file):
fn, c = a_file
with get_readable_fileobj(py.path.local(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_pathlib(a_file):
fn, c = a_file
with get_readable_fileobj(pathlib.Path(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_binary(a_binary_file):
fn, c = a_binary_file
with get_readable_fileobj(fn, encoding="binary") as rf:
assert rf.read() == c
def test_get_fileobj_already_open_text(a_file):
fn, c = a_file
with open(fn) as f:
with get_readable_fileobj(f) as rf:
with pytest.raises(TypeError):
rf.read()
def test_get_fileobj_already_open_binary(a_file):
fn, c = a_file
with open(fn, "rb") as f:
with get_readable_fileobj(f) as rf:
assert rf.read() == c
def test_get_fileobj_binary_already_open_binary(a_binary_file):
fn, c = a_binary_file
with open(fn, "rb") as f:
with get_readable_fileobj(f, encoding="binary") as rf:
assert rf.read() == c
def test_cache_contents_not_writable(temp_cache, valid_urls):
c = cache_contents()
with pytest.raises(TypeError):
c["foo"] = 7
u, _ = next(valid_urls)
download_file(u, cache=True)
c = cache_contents()
assert u in c
with pytest.raises(TypeError):
c["foo"] = 7
def test_cache_relocatable(tmpdir, valid_urls):
u, c = next(valid_urls)
d1 = tmpdir / "1"
d2 = tmpdir / "2"
os.mkdir(d1)
with paths.set_temp_cache(d1):
p1 = download_file(u, cache=True)
assert is_url_in_cache(u)
assert get_file_contents(p1) == c
shutil.copytree(d1, d2)
clear_download_cache()
with paths.set_temp_cache(d2):
assert is_url_in_cache(u)
p2 = download_file(u, cache=True)
assert p1 != p2
assert os.path.exists(p2)
clear_download_cache(p2)
check_download_cache()
def test_get_readable_fileobj_cleans_up_temporary_files(tmpdir, monkeypatch):
"""checks that get_readable_fileobj leaves no temporary files behind"""
# Create a 'file://' URL pointing to a path on the local filesystem
url = url_to(TESTLOCAL)
# Save temporary files to a known location
monkeypatch.setattr(tempfile, "tempdir", str(tmpdir))
# Call get_readable_fileobj() as a context manager
with get_readable_fileobj(url) as f:
f.read()
# Get listing of files in temporary directory
tempdir_listing = tmpdir.listdir()
# Assert that the temporary file was empty after get_readable_fileobj()
# context manager finished running
assert len(tempdir_listing) == 0
def test_path_objects_get_readable_fileobj():
fpath = pathlib.Path(TESTLOCAL)
with get_readable_fileobj(fpath) as f:
assert f.read().rstrip() == (
"This file is used in the test_local_data_* testing functions\nCONTENT"
)
def test_nested_get_readable_fileobj():
"""Ensure fileobj state is as expected when get_readable_fileobj()
is called inside another get_readable_fileobj().
"""
with get_readable_fileobj(TESTLOCAL, encoding="binary") as fileobj:
with get_readable_fileobj(fileobj, encoding="UTF-8") as fileobj2:
fileobj2.seek(1)
fileobj.seek(1)
# Theoretically, fileobj2 should be closed already here but it is not.
# See https://github.com/astropy/astropy/pull/8675.
# UNCOMMENT THIS WHEN PYTHON FINALLY LETS IT HAPPEN.
# assert fileobj2.closed
assert fileobj.closed and fileobj2.closed
def test_download_file_wrong_size(monkeypatch):
@contextlib.contextmanager
def mockurl(remote_url, timeout=None):
yield MockURL()
def mockurl_builder(*args, tlscontext=None, **kwargs):
mock_opener = type('MockOpener', (object,), {})()
mock_opener.open = mockurl
return mock_opener
class MockURL:
def __init__(self):
self.reader = io.BytesIO(b"a" * real_length)
def info(self):
return {"Content-Length": str(report_length)}
def read(self, length=None):
return self.reader.read(length)
monkeypatch.setattr(astropy.utils.data, "_build_urlopener", mockurl_builder)
with pytest.raises(urllib.error.ContentTooShortError):
report_length = 1024
real_length = 1023
download_file(TESTURL, cache=False)
with pytest.raises(urllib.error.URLError):
report_length = 1023
real_length = 1024
download_file(TESTURL, cache=False)
report_length = 1023
real_length = 1023
fn = download_file(TESTURL, cache=False)
with open(fn, "rb") as f:
assert f.read() == b"a" * real_length
report_length = None
real_length = 1023
fn = download_file(TESTURL, cache=False)
with open(fn, "rb") as f:
assert f.read() == b"a" * real_length
def test_can_make_directories_readonly(tmpdir):
try:
with readonly_dir(tmpdir):
assert is_dir_readonly(tmpdir)
except AssertionError:
if hasattr(os, "geteuid") and os.geteuid() == 0:
pytest.skip(
"We are root, we can't make a directory un-writable with chmod."
)
elif platform.system() == "Windows":
pytest.skip(
"It seems we can't make a driectory un-writable under Windows "
"with chmod, in spite of the documentation."
)
else:
raise
def test_can_make_files_readonly(tmpdir):
fn = tmpdir / "test"
c = "contents\n"
with open(fn, "w") as f:
f.write(c)
with readonly_dir(tmpdir):
try:
with open(fn, "w+") as f:
f.write("more contents\n")
except PermissionError:
pass
else:
if hasattr(os, "geteuid") and os.geteuid() == 0:
pytest.skip("We are root, we can't make a file un-writable with chmod.")
assert get_file_contents(fn) == c
def test_read_cache_readonly(readonly_cache):
assert cache_contents() == readonly_cache
def test_download_file_cache_readonly(readonly_cache):
for u in readonly_cache:
f = download_file(u, cache=True)
assert f == readonly_cache[u]
def test_import_file_cache_readonly(readonly_cache, tmpdir):
filename = os.path.join(tmpdir, "test-file")
content = "Some text or other"
url = "http://example.com/"
with open(filename, "wt") as f:
f.write(content)
with pytest.raises(OSError):
import_file_to_cache(url, filename, remove_original=True)
assert not is_url_in_cache(url)
def test_download_file_cache_readonly_cache_miss(readonly_cache, valid_urls):
u, c = next(valid_urls)
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache=True)
assert get_file_contents(f) == c
assert not is_url_in_cache(u)
def test_download_file_cache_readonly_update(readonly_cache):
for u in readonly_cache:
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache="update")
assert f != readonly_cache[u]
assert compute_hash(f) == compute_hash(readonly_cache[u])
def test_check_download_cache_works_if_readonly(readonly_cache):
check_download_cache()
# On Windows I can't make directories readonly. On CircleCI I can't make
# anything readonly because the test suite runs as root. So on those platforms
# none of the "real" tests above can be run. I can use monkeypatch to trigger
# the readonly code paths, see the "fake" versions of the tests below, but I
# don't totally trust those to completely explore what happens either, so we
# have both. I couldn't see an easy way to parameterize over fixtures and share
# tests.
def test_read_cache_fake_readonly(fake_readonly_cache):
assert cache_contents() == fake_readonly_cache
def test_download_file_cache_fake_readonly(fake_readonly_cache):
for u in fake_readonly_cache:
f = download_file(u, cache=True)
assert f == fake_readonly_cache[u]
def test_mkdtemp_cache_fake_readonly(fake_readonly_cache):
with pytest.raises(OSError):
tempfile.mkdtemp()
def test_TD_cache_fake_readonly(fake_readonly_cache):
with pytest.raises(OSError):
with TemporaryDirectory():
pass
def test_import_file_cache_fake_readonly(fake_readonly_cache, tmpdir):
filename = os.path.join(tmpdir, "test-file")
content = "Some text or other"
url = "http://example.com/"
with open(filename, "wt") as f:
f.write(content)
with pytest.raises(OSError):
import_file_to_cache(url, filename, remove_original=True)
assert not is_url_in_cache(url)
def test_download_file_cache_fake_readonly_cache_miss(fake_readonly_cache, valid_urls):
u, c = next(valid_urls)
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache=True)
assert not is_url_in_cache(u)
assert get_file_contents(f) == c
def test_download_file_cache_fake_readonly_update(fake_readonly_cache):
for u in fake_readonly_cache:
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache="update")
assert f != fake_readonly_cache[u]
assert compute_hash(f) == compute_hash(fake_readonly_cache[u])
def test_check_download_cache_works_if_fake_readonly(fake_readonly_cache):
check_download_cache()
def test_pkgname_isolation(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True, pkgname=a)
assert not get_cached_urls()
assert len(get_cached_urls(pkgname=a)) == FEW
assert cache_total_size() < cache_total_size(pkgname=a)
for u, _ in islice(valid_urls, FEW+1):
download_file(u, cache=True)
assert len(get_cached_urls()) == FEW+1
assert len(get_cached_urls(pkgname=a)) == FEW
assert cache_total_size() > cache_total_size(pkgname=a)
assert set(get_cached_urls()) == set(cache_contents().keys())
assert set(get_cached_urls(pkgname=a)) == set(cache_contents(pkgname=a).keys())
for i in get_cached_urls():
assert is_url_in_cache(i)
assert not is_url_in_cache(i, pkgname=a)
for i in get_cached_urls(pkgname=a):
assert not is_url_in_cache(i)
assert is_url_in_cache(i, pkgname=a)
# FIXME: need to break a cache to test whether we check the right one
check_download_cache()
check_download_cache(pkgname=a)
# FIXME: check that cache='update' works
u = get_cached_urls()[0]
with pytest.raises(KeyError):
download_file(u, cache=True, sources=[], pkgname=a)
clear_download_cache(u, pkgname=a)
assert len(get_cached_urls()) == FEW+1, "wrong pkgname should do nothing"
assert len(get_cached_urls(pkgname=a)) == FEW, "wrong pkgname should do nothing"
f = download_file(u, sources=[], cache=True)
with pytest.raises(RuntimeError):
clear_download_cache(f, pkgname=a)
ua = get_cached_urls(pkgname=a)[0]
with pytest.raises(KeyError):
download_file(ua, cache=True, sources=[])
fa = download_file(ua, sources=[], cache=True, pkgname=a)
with pytest.raises(RuntimeError):
clear_download_cache(fa)
clear_download_cache(ua, pkgname=a)
assert len(get_cached_urls()) == FEW+1
assert len(get_cached_urls(pkgname=a)) == FEW-1
clear_download_cache(u)
assert len(get_cached_urls()) == FEW
assert len(get_cached_urls(pkgname=a)) == FEW-1
clear_download_cache(pkgname=a)
assert len(get_cached_urls()) == FEW
assert not get_cached_urls(pkgname=a)
clear_download_cache()
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
def test_transport_cache_via_zip(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True)
with io.BytesIO() as f:
export_download_cache(f)
b = f.getvalue()
with io.BytesIO(b) as f:
import_download_cache(f, pkgname=a)
check_download_cache()
check_download_cache(pkgname=a)
assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))
cca = cache_contents(pkgname=a)
for k, v in cache_contents().items():
assert v != cca[k]
assert get_file_contents(v) == get_file_contents(cca[k])
clear_download_cache()
with io.BytesIO() as f:
export_download_cache(f, pkgname=a)
b = f.getvalue()
with io.BytesIO(b) as f:
import_download_cache(f)
assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))
def test_download_parallel_respects_pkgname(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
download_files_in_parallel([u for (u, c) in islice(valid_urls, FEW)],
pkgname=a)
assert not get_cached_urls()
assert len(get_cached_urls(pkgname=a)) == FEW
@pytest.mark.skipif(not CAN_RENAME_DIRECTORY_IN_USE,
reason="This platform is unable to rename directories that are in use.")
def test_removal_of_open_files(temp_cache, valid_urls):
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
clear_download_cache(u)
assert not is_url_in_cache(u)
check_download_cache()
@pytest.mark.skipif(not CAN_RENAME_DIRECTORY_IN_USE,
reason="This platform is unable to rename directories that are in use.")
def test_update_of_open_files(temp_cache, valid_urls):
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
u2, c2 = next(valid_urls)
f = download_file(u, cache='update', sources=[u2])
check_download_cache()
assert is_url_in_cache(u)
assert get_file_contents(f) == c2
assert is_url_in_cache(u)
def test_removal_of_open_files_windows(temp_cache, valid_urls, monkeypatch):
def no_rmtree(*args, **kwargs):
warnings.warn(CacheMissingWarning("in use"))
raise PermissionError
if CAN_RENAME_DIRECTORY_IN_USE:
# This platform is able to remove files while in use.
monkeypatch.setattr(astropy.utils.data, "_rmtree", no_rmtree)
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
with pytest.warns(CacheMissingWarning, match=r".*in use.*"):
clear_download_cache(u)
def test_update_of_open_files_windows(temp_cache, valid_urls, monkeypatch):
def no_rmtree(*args, **kwargs):
warnings.warn(CacheMissingWarning("in use"))
raise PermissionError
if CAN_RENAME_DIRECTORY_IN_USE:
# This platform is able to remove files while in use.
monkeypatch.setattr(astropy.utils.data, "_rmtree", no_rmtree)
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
u2, c2 = next(valid_urls)
with pytest.warns(CacheMissingWarning, match=r".*in use.*"):
f = download_file(u, cache='update', sources=[u2])
check_download_cache()
assert is_url_in_cache(u)
assert get_file_contents(f) == c2
assert get_file_contents(download_file(u, cache=True, sources=[])) == c
def test_no_allow_internet(temp_cache, valid_urls):
u, c = next(valid_urls)
with conf.set_temp('allow_internet', False):
with pytest.raises(urllib.error.URLError):
download_file(u)
assert not is_url_in_cache(u)
with pytest.raises(urllib.error.URLError):
# This will trigger the remote data error if it's allowed to touch the internet
download_file(TESTURL)
def test_clear_download_cache_not_too_aggressive(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file(u, cache=True)
dldir = _get_download_cache_loc()
bad_filename = os.path.join(dldir, "contents")
assert is_url_in_cache(u)
clear_download_cache(bad_filename)
assert is_url_in_cache(u)
def test_clear_download_cache_variants(temp_cache, valid_urls):
# deletion by contents filename
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(f)
assert not is_url_in_cache(u)
# deletion by url filename
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.join(os.path.dirname(f), 'url'))
assert not is_url_in_cache(u)
# deletion by hash directory name
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.dirname(f))
assert not is_url_in_cache(u)
# deletion by directory name with trailing slash
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.dirname(f)+'/')
assert not is_url_in_cache(u)
# deletion by hash of file contents
u, c = next(valid_urls)
f = download_file(u, cache=True)
h = compute_hash(f)
clear_download_cache(h)
assert not is_url_in_cache(u)
@pytest.mark.skipif("CI", reason="Flaky on CI")
@pytest.mark.remote_data
def test_ftp_tls_auto(temp_cache):
url = "ftp://anonymous:mail%[email protected]/pub/products/iers/finals2000A.all" # noqa
download_file(url)
@pytest.mark.parametrize('base', ["http://example.com", "https://example.com"])
def test_url_trailing_slash(temp_cache, valid_urls, base):
slash = base + "/"
no_slash = base
u, c = next(valid_urls)
download_file(slash, cache=True, sources=[u])
assert is_url_in_cache(no_slash)
download_file(no_slash, cache=True, sources=[])
clear_download_cache(no_slash)
assert not is_url_in_cache(no_slash)
assert not is_url_in_cache(slash)
download_file(no_slash, cache=True, sources=[u])
# see if implicit check_download_cache squawks
def test_empty_url(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file('file://', cache=True, sources=[u])
assert not is_url_in_cache('file:///')
@pytest.mark.remote_data
def test_download_ftp_file_properly_handles_socket_error():
faulty_url = "ftp://anonymous:mail%40astropy.org@nonexisting/pub/products/iers/finals2000A.all"
with pytest.raises(urllib.error.URLError) as excinfo:
download_file(faulty_url)
errmsg = excinfo.exconly()
found_msg = False
possible_msgs = ['Name or service not known',
'nodename nor servname provided, or not known',
'getaddrinfo failed',
'Temporary failure in name resolution',
'No address associated with hostname']
for cur_msg in possible_msgs:
if cur_msg in errmsg:
found_msg = True
break
assert found_msg, f'Got {errmsg}, expected one of these: {",".join(possible_msgs)}'
@pytest.mark.parametrize(
('s', 'ans'),
[('http://googlecom', True),
('https://google.com', True),
('ftp://google.com', True),
('sftp://google.com', True),
('ssh://google.com', True),
('file:///c:/path/to/the%20file.txt', True),
('google.com', False),
('C:\\\\path\\\\file.docx', False),
('data://file', False)])
def test_string_is_url_check(s, ans):
assert is_url(s) is ans
|
29c4559962daaed2eda4b91ac3da247da506dc27b0c02bdea4086ccd84da6385 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import pytest
from . import test_progress_bar_func
from astropy.utils import console
from astropy import units as u
class FakeTTY(io.StringIO):
"""IOStream that fakes a TTY; provide an encoding to emulate an output
stream with a specific encoding.
"""
def __new__(cls, encoding=None):
# Return a new subclass of FakeTTY with the requested encoding
if encoding is None:
return super().__new__(cls)
encoding = encoding
cls = type(encoding.title() + cls.__name__, (cls,),
{'encoding': encoding})
return cls.__new__(cls)
def __init__(self, encoding=None):
super().__init__()
def write(self, s):
if isinstance(s, bytes):
# Just allow this case to work
s = s.decode('latin-1')
elif self.encoding is not None:
s.encode(self.encoding)
return super().write(s)
def isatty(self):
return True
def test_fake_tty():
# First test without a specified encoding; we should be able to write
# arbitrary unicode strings
f1 = FakeTTY()
assert f1.isatty()
f1.write('☃')
assert f1.getvalue() == '☃'
# Now test an ASCII-only TTY--it should raise a UnicodeEncodeError when
# trying to write a string containing non-ASCII characters
f2 = FakeTTY('ascii')
assert f2.isatty()
assert f2.__class__.__name__ == 'AsciiFakeTTY'
assert pytest.raises(UnicodeEncodeError, f2.write, '☃')
assert f2.getvalue() == ''
@pytest.mark.skipif("sys.platform.startswith('win')")
def test_color_text():
assert console._color_text("foo", "green") == '\033[0;32mfoo\033[0m'
def test_color_print():
# This stuff is hard to test, at least smoke test it
console.color_print("foo", "green")
console.color_print("foo", "green", "bar", "red")
def test_color_print2():
# Test that this automatically detects that io.StringIO is
# not a tty
stream = io.StringIO()
console.color_print("foo", "green", file=stream)
assert stream.getvalue() == 'foo\n'
stream = io.StringIO()
console.color_print("foo", "green", "bar", "red", "baz", file=stream)
assert stream.getvalue() == 'foobarbaz\n'
@pytest.mark.skipif("sys.platform.startswith('win')")
def test_color_print3():
# Test that this thinks the FakeTTY is a tty and applies colors.
stream = FakeTTY()
console.color_print("foo", "green", file=stream)
assert stream.getvalue() == '\x1b[0;32mfoo\x1b[0m\n'
stream = FakeTTY()
console.color_print("foo", "green", "bar", "red", "baz", file=stream)
assert stream.getvalue() == '\x1b[0;32mfoo\x1b[0m\x1b[0;31mbar\x1b[0mbaz\n'
def test_color_print_unicode():
console.color_print("überbær", "red")
def test_color_print_invalid_color():
console.color_print("foo", "unknown")
def test_spinner_non_unicode_console():
"""Regression test for #1760
Ensures that the spinner can fall go into fallback mode when using the
unicode spinner on a terminal whose default encoding cannot encode the
unicode characters.
"""
stream = FakeTTY('ascii')
chars = console.Spinner._default_unicode_chars
with console.Spinner("Reticulating splines", file=stream,
chars=chars) as s:
next(s)
def test_progress_bar():
# This stuff is hard to test, at least smoke test it
with console.ProgressBar(50) as bar:
for i in range(50):
bar.update()
def test_progress_bar2():
for x in console.ProgressBar(range(50)):
pass
def test_progress_bar3():
def do_nothing(*args, **kwargs):
pass
console.ProgressBar.map(do_nothing, range(50))
def test_zero_progress_bar():
with console.ProgressBar(0) as bar:
pass
def test_progress_bar_as_generator():
sum = 0
for x in console.ProgressBar(range(50)):
sum += x
assert sum == 1225
sum = 0
for x in console.ProgressBar(50):
sum += x
assert sum == 1225
def test_progress_bar_map():
items = list(range(100))
result = console.ProgressBar.map(test_progress_bar_func.func,
items, step=10, multiprocess=True)
assert items == result
result1 = console.ProgressBar.map(test_progress_bar_func.func,
items, step=10, multiprocess=2)
assert items == result1
@pytest.mark.parametrize(("seconds", "string"),
[(864088, " 1w 3d"),
(187213, " 2d 4h"),
(3905, " 1h 5m"),
(64, " 1m 4s"),
(15, " 15s"),
(2, " 2s")]
)
def test_human_time(seconds, string):
human_time = console.human_time(seconds)
assert human_time == string
@pytest.mark.parametrize(("size", "string"),
[(8640882, "8.6M"),
(187213, "187k"),
(3905, "3.9k"),
(64, " 64 "),
(2, " 2 "),
(10*u.GB, " 10G")]
)
def test_human_file_size(size, string):
human_time = console.human_file_size(size)
assert human_time == string
@pytest.mark.parametrize("size", (50*u.km, 100*u.g))
def test_bad_human_file_size(size):
assert pytest.raises(u.UnitConversionError, console.human_file_size, size)
|
2874cdd5f139057a176ee2fcd0b0f886f0d2280ea6406c94f349bd6c348ae9b6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Built-in mask mixin class.
The design uses `Masked` as a factory class which automatically
generates new subclasses for any data class that is itself a
subclass of a predefined masked class, with `MaskedNDArray`
providing such a predefined class for `~numpy.ndarray`.
Generally, any new predefined class should override the
``from_unmasked(data, mask, copy=False)`` class method that
creates an instance from unmasked data and a mask, as well as
the ``unmasked`` property that returns just the data.
The `Masked` class itself provides a base ``mask`` property,
which can also be overridden if needed.
"""
import builtins
import numpy as np
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.shapes import NDArrayShapeMethods
from astropy.utils.data_info import ParentDtypeInfo
from .function_helpers import (MASKED_SAFE_FUNCTIONS,
APPLY_TO_BOTH_FUNCTIONS,
DISPATCHED_FUNCTIONS,
UNSUPPORTED_FUNCTIONS)
__all__ = ['Masked', 'MaskedNDArray']
get__doc__ = """Masked version of {0.__name__}.
Except for the ability to pass in a ``mask``, parameters are
as for `{0.__module__}.{0.__name__}`.
""".format
class Masked(NDArrayShapeMethods):
"""A scalar value or array of values with associated mask.
The resulting instance will take its exact type from whatever the
contents are, with the type generated on the fly as needed.
Parameters
----------
data : array-like
The data for which a mask is to be added. The result will be a
a subclass of the type of ``data``.
mask : array-like of bool, optional
The initial mask to assign. If not given, taken from the data.
copy : bool
Whether the data and mask should be copied. Default: `False`.
"""
_base_classes = {}
"""Explicitly defined masked classes keyed by their unmasked counterparts.
For subclasses of these unmasked classes, masked counterparts can be generated.
"""
_masked_classes = {}
"""Masked classes keyed by their unmasked data counterparts."""
def __new__(cls, *args, **kwargs):
if cls is Masked:
# Initializing with Masked itself means we're in "factory mode".
if not kwargs and len(args) == 1 and isinstance(args[0], type):
# Create a new masked class.
return cls._get_masked_cls(args[0])
else:
return cls._get_masked_instance(*args, **kwargs)
else:
# Otherwise we're a subclass and should just pass information on.
return super().__new__(cls, *args, **kwargs)
def __init_subclass__(cls, base_cls=None, data_cls=None, **kwargs):
"""Register a Masked subclass.
Parameters
----------
base_cls : type, optional
If given, it is taken to mean that ``cls`` can be used as
a base for masked versions of all subclasses of ``base_cls``,
so it is registered as such in ``_base_classes``.
data_cls : type, optional
If given, ``cls`` should will be registered as the masked version of
``data_cls``. Will set the private ``cls._data_cls`` attribute,
and auto-generate a docstring if not present already.
**kwargs
Passed on for possible further initialization by superclasses.
"""
if base_cls is not None:
Masked._base_classes[base_cls] = cls
if data_cls is not None:
cls._data_cls = data_cls
cls._masked_classes[data_cls] = cls
if cls.__doc__ is None:
cls.__doc__ = get__doc__(data_cls)
super().__init_subclass__(**kwargs)
# This base implementation just uses the class initializer.
# Subclasses can override this in case the class does not work
# with this signature, or to provide a faster implementation.
@classmethod
def from_unmasked(cls, data, mask=None, copy=False):
"""Create an instance from unmasked data and a mask."""
return cls(data, mask=mask, copy=copy)
@classmethod
def _get_masked_instance(cls, data, mask=None, copy=False):
data, data_mask = cls._get_data_and_mask(data)
if mask is None:
mask = False if data_mask is None else data_mask
masked_cls = cls._get_masked_cls(data.__class__)
return masked_cls.from_unmasked(data, mask, copy)
@classmethod
def _get_masked_cls(cls, data_cls):
"""Get the masked wrapper for a given data class.
If the data class does not exist yet but is a subclass of any of the
registered base data classes, it is automatically generated
(except we skip `~numpy.ma.MaskedArray` subclasses, since then the
masking mechanisms would interfere).
"""
if issubclass(data_cls, (Masked, np.ma.MaskedArray)):
return data_cls
masked_cls = cls._masked_classes.get(data_cls)
if masked_cls is None:
# Walk through MRO and find closest base data class.
# Note: right now, will basically always be ndarray, but
# one could imagine needing some special care for one subclass,
# which would then get its own entry. E.g., if MaskedAngle
# defined something special, then MaskedLongitude should depend
# on it.
for mro_item in data_cls.__mro__:
base_cls = cls._base_classes.get(mro_item)
if base_cls is not None:
break
else:
# Just hope that MaskedNDArray can handle it.
# TODO: this covers the case where a user puts in a list or so,
# but for those one could just explicitly do something like
# _masked_classes[list] = MaskedNDArray.
return MaskedNDArray
# Create (and therefore register) new Masked subclass for the
# given data_cls.
masked_cls = type('Masked' + data_cls.__name__,
(data_cls, base_cls), {}, data_cls=data_cls)
return masked_cls
@classmethod
def _get_data_and_mask(cls, data, allow_ma_masked=False):
"""Split data into unmasked and mask, if present.
Parameters
----------
data : array-like
Possibly masked item, judged by whether it has a ``mask`` attribute.
If so, checks for being an instance of `~astropy.utils.masked.Masked`
or `~numpy.ma.MaskedArray`, and gets unmasked data appropriately.
allow_ma_masked : bool, optional
Whether or not to process `~numpy.ma.masked`, i.e., an item that
implies no data but the presence of a mask.
Returns
-------
unmasked, mask : array-like
Unmasked will be `None` for `~numpy.ma.masked`.
Raises
------
ValueError
If `~numpy.ma.masked` is passed in and ``allow_ma_masked`` is not set.
"""
mask = getattr(data, 'mask', None)
if mask is not None:
try:
data = data.unmasked
except AttributeError:
if not isinstance(data, np.ma.MaskedArray):
raise
if data is np.ma.masked:
if allow_ma_masked:
data = None
else:
raise ValueError('cannot handle np.ma.masked here.') from None
else:
data = data.data
return data, mask
@classmethod
def _get_data_and_masks(cls, *args):
data_masks = [cls._get_data_and_mask(arg) for arg in args]
return (tuple(data for data, _ in data_masks),
tuple(mask for _, mask in data_masks))
def _get_mask(self):
"""The mask.
If set, replace the original mask, with whatever it is set with,
using a view if no broadcasting or type conversion is required.
"""
return self._mask
def _set_mask(self, mask, copy=False):
self_dtype = getattr(self, 'dtype', None)
mask_dtype = (np.ma.make_mask_descr(self_dtype)
if self_dtype and self_dtype.names else np.dtype('?'))
ma = np.asanyarray(mask, dtype=mask_dtype)
if ma.shape != self.shape:
# This will fail (correctly) if not broadcastable.
self._mask = np.empty(self.shape, dtype=mask_dtype)
self._mask[...] = ma
elif ma is mask:
# Even if not copying use a view so that shape setting
# does not propagate.
self._mask = mask.copy() if copy else mask.view()
else:
self._mask = ma
mask = property(_get_mask, _set_mask)
# Note: subclass should generally override the unmasked property.
# This one assumes the unmasked data is stored in a private attribute.
@property
def unmasked(self):
"""The unmasked values.
See Also
--------
astropy.utils.masked.Masked.filled
"""
return self._unmasked
def filled(self, fill_value):
"""Get a copy of the underlying data, with masked values filled in.
Parameters
----------
fill_value : object
Value to replace masked values with.
See Also
--------
astropy.utils.masked.Masked.unmasked
"""
unmasked = self.unmasked.copy()
if self.mask.dtype.names:
np.ma.core._recursive_filled(unmasked, self.mask, fill_value)
else:
unmasked[self.mask] = fill_value
return unmasked
def _apply(self, method, *args, **kwargs):
# Required method for NDArrayShapeMethods, to help provide __getitem__
# and shape-changing methods.
if callable(method):
data = method(self.unmasked, *args, **kwargs)
mask = method(self.mask, *args, **kwargs)
else:
data = getattr(self.unmasked, method)(*args, **kwargs)
mask = getattr(self.mask, method)(*args, **kwargs)
result = self.from_unmasked(data, mask, copy=False)
if 'info' in self.__dict__:
result.info = self.info
return result
def __setitem__(self, item, value):
value, mask = self._get_data_and_mask(value, allow_ma_masked=True)
if value is not None:
self.unmasked[item] = value
self.mask[item] = mask
class MaskedInfoBase:
mask_val = np.ma.masked
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
self.serialize_method = {'fits': 'null_value',
'ecsv': 'null_value',
'hdf5': 'data_mask',
'parquet': 'data_mask',
None: 'null_value'}
class MaskedNDArrayInfo(MaskedInfoBase, ParentDtypeInfo):
"""
Container for meta information like name, description, format.
"""
# Add `serialize_method` attribute to the attrs that MaskedNDArrayInfo knows
# about. This allows customization of the way that MaskedColumn objects
# get written to file depending on format. The default is to use whatever
# the writer would normally do, which in the case of FITS or ECSV is to use
# a NULL value within the data itself. If serialize_method is 'data_mask'
# then the mask is explicitly written out as a separate column if there
# are any masked values. This is the same as for MaskedColumn.
attr_names = ParentDtypeInfo.attr_names | {'serialize_method'}
# When `serialize_method` is 'data_mask', and data and mask are being written
# as separate columns, use column names <name> and <name>.mask (instead
# of default encoding as <name>.data and <name>.mask).
_represent_as_dict_primary_data = 'data'
def _represent_as_dict(self):
out = super()._represent_as_dict()
masked_array = self._parent
# If the serialize method for this context (e.g. 'fits' or 'ecsv') is
# 'data_mask', that means to serialize using an explicit mask column.
method = self.serialize_method[self._serialize_context]
if method == 'data_mask':
out['data'] = masked_array.unmasked
if np.any(masked_array.mask):
# Only if there are actually masked elements do we add the ``mask`` column
out['mask'] = masked_array.mask
elif method == 'null_value':
out['data'] = np.ma.MaskedArray(masked_array.unmasked,
mask=masked_array.mask)
else:
raise ValueError('serialize method must be either "data_mask" or "null_value"')
return out
def _construct_from_dict(self, map):
# Override usual handling, since MaskedNDArray takes shape and buffer
# as input, which is less useful here.
# The map can contain either a MaskedColumn or a Column and a mask.
# Extract the mask for the former case.
map.setdefault('mask', getattr(map['data'], 'mask', False))
return self._parent_cls.from_unmasked(**map)
class MaskedArraySubclassInfo(MaskedInfoBase):
"""Mixin class to create a subclasses such as MaskedQuantityInfo."""
# This is used below in __init_subclass__, which also inserts a
# 'serialize_method' attribute in attr_names.
def _represent_as_dict(self):
# Use the data_cls as the class name for serialization,
# so that we do not have to store all possible masked classes
# in astropy.table.serialize.__construct_mixin_classes.
out = super()._represent_as_dict()
data_cls = self._parent._data_cls
out.setdefault('__class__',
data_cls.__module__ + '.' + data_cls.__name__)
return out
def _comparison_method(op):
"""
Create a comparison operator for MaskedNDArray.
Needed since for string dtypes the base operators bypass __array_ufunc__
and hence return unmasked results.
"""
def _compare(self, other):
other_data, other_mask = self._get_data_and_mask(other)
result = getattr(self.unmasked, op)(other_data)
if result is NotImplemented:
return NotImplemented
mask = self.mask | (other_mask if other_mask is not None else False)
return self._masked_result(result, mask, None)
return _compare
class MaskedIterator:
"""
Flat iterator object to iterate over Masked Arrays.
A `~astropy.utils.masked.MaskedIterator` iterator is returned by ``m.flat``
for any masked array ``m``. It allows iterating over the array as if it
were a 1-D array, either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
Notes
-----
The design of `~astropy.utils.masked.MaskedIterator` follows that of
`~numpy.ma.core.MaskedIterator`. It is not exported by the
`~astropy.utils.masked` module. Instead of instantiating directly,
use the ``flat`` method in the masked array instance.
"""
def __init__(self, m):
self._masked = m
self._dataiter = m.unmasked.flat
self._maskiter = m.mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
mask = self._maskiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Masked array.
if not isinstance(out, np.ndarray):
out = out[...]
mask = mask[...]
return self._masked.from_unmasked(out, mask, copy=False)
def __setitem__(self, index, value):
data, mask = self._masked._get_data_and_mask(value, allow_ma_masked=True)
if data is not None:
self._dataiter[index] = data
self._maskiter[index] = mask
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)[...]
mask = next(self._maskiter)[...]
return self._masked.from_unmasked(out, mask, copy=False)
next = __next__
class MaskedNDArray(Masked, np.ndarray, base_cls=np.ndarray, data_cls=np.ndarray):
_mask = None
info = MaskedNDArrayInfo()
def __new__(cls, *args, mask=None, **kwargs):
"""Get data class instance from arguments and then set mask."""
self = super().__new__(cls, *args, **kwargs)
if mask is not None:
self.mask = mask
elif self._mask is None:
self.mask = False
return self
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(cls, **kwargs)
# For all subclasses we should set a default __new__ that passes on
# arguments other than mask to the data class, and then sets the mask.
if '__new__' not in cls.__dict__:
def __new__(newcls, *args, mask=None, **kwargs):
"""Get data class instance from arguments and then set mask."""
# Need to explicitly mention classes outside of class definition.
self = super(cls, newcls).__new__(newcls, *args, **kwargs)
if mask is not None:
self.mask = mask
elif self._mask is None:
self.mask = False
return self
cls.__new__ = __new__
if 'info' not in cls.__dict__ and hasattr(cls._data_cls, 'info'):
data_info = cls._data_cls.info
attr_names = data_info.attr_names | {'serialize_method'}
new_info = type(cls.__name__+'Info',
(MaskedArraySubclassInfo, data_info.__class__),
dict(attr_names=attr_names))
cls.info = new_info()
# The two pieces typically overridden.
@classmethod
def from_unmasked(cls, data, mask=None, copy=False):
# Note: have to override since __new__ would use ndarray.__new__
# which expects the shape as its first argument, not an array.
data = np.array(data, subok=True, copy=copy)
self = data.view(cls)
self._set_mask(mask, copy=copy)
return self
@property
def unmasked(self):
return super().view(self._data_cls)
@classmethod
def _get_masked_cls(cls, data_cls):
# Short-cuts
if data_cls is np.ndarray:
return MaskedNDArray
elif data_cls is None: # for .view()
return cls
return super()._get_masked_cls(data_cls)
@property
def flat(self):
"""A 1-D iterator over the Masked array.
This returns a ``MaskedIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to Python's built-in iterator, except that it also
allows assignment.
"""
return MaskedIterator(self)
@property
def _baseclass(self):
"""Work-around for MaskedArray initialization.
Allows the base class to be inferred correctly when a masked instance
is used to initialize (or viewed as) a `~numpy.ma.MaskedArray`.
"""
return self._data_cls
def view(self, dtype=None, type=None):
"""New view of the masked array.
Like `numpy.ndarray.view`, but always returning a masked array subclass.
"""
if type is None and (isinstance(dtype, builtins.type)
and issubclass(dtype, np.ndarray)):
return super().view(self._get_masked_cls(dtype))
if dtype is None:
return super().view(self._get_masked_cls(type))
dtype = np.dtype(dtype)
if not (dtype.itemsize == self.dtype.itemsize
and (dtype.names is None
or len(dtype.names) == len(self.dtype.names))):
raise NotImplementedError(
f"{self.__class__} cannot be viewed with a dtype with a "
f"with a different number of fields or size.")
return super().view(dtype, self._get_masked_cls(type))
def __array_finalize__(self, obj):
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# Logically, this should come from ndarray and hence be None, but
# just in case someone creates a new mixin, we check.
super_array_finalize = super().__array_finalize__
if super_array_finalize: # pragma: no cover
super_array_finalize(obj)
if self._mask is None:
# Got here after, e.g., a view of another masked class.
# Get its mask, or initialize ours.
self._set_mask(getattr(obj, '_mask', False))
if 'info' in obj.__dict__:
self.info = obj.info
@property
def shape(self):
"""The shape of the data and the mask.
Usually used to get the current shape of an array, but may also be
used to reshape the array in-place by assigning a tuple of array
dimensions to it. As with `numpy.reshape`, one of the new shape
dimensions can be -1, in which case its value is inferred from the
size of the array and the remaining dimensions.
Raises
------
AttributeError
If a copy is required, of either the data or the mask.
"""
# Redefinition to allow defining a setter and add a docstring.
return super().shape
@shape.setter
def shape(self, shape):
old_shape = self.shape
self._mask.shape = shape
# Reshape array proper in try/except just in case some broadcasting
# or so causes it to fail.
try:
super(MaskedNDArray, type(self)).shape.__set__(self, shape)
except Exception as exc:
self._mask.shape = old_shape
# Given that the mask reshaping succeeded, the only logical
# reason for an exception is something like a broadcast error in
# in __array_finalize__, or a different memory ordering between
# mask and data. For those, give a more useful error message;
# otherwise just raise the error.
if 'could not broadcast' in exc.args[0]:
raise AttributeError(
'Incompatible shape for in-place modification. '
'Use `.reshape()` to make a copy with the desired '
'shape.') from None
else: # pragma: no cover
raise
_eq_simple = _comparison_method('__eq__')
_ne_simple = _comparison_method('__ne__')
__lt__ = _comparison_method('__lt__')
__le__ = _comparison_method('__le__')
__gt__ = _comparison_method('__gt__')
__ge__ = _comparison_method('__ge__')
def __eq__(self, other):
if not self.dtype.names:
return self._eq_simple(other)
# For structured arrays, we treat this as a reduction over the fields,
# where masked fields are skipped and thus do not influence the result.
other = np.asanyarray(other, dtype=self.dtype)
result = np.stack([self[field] == other[field]
for field in self.dtype.names], axis=-1)
return result.all(axis=-1)
def __ne__(self, other):
if not self.dtype.names:
return self._ne_simple(other)
# For structured arrays, we treat this as a reduction over the fields,
# where masked fields are skipped and thus do not influence the result.
other = np.asanyarray(other, dtype=self.dtype)
result = np.stack([self[field] != other[field]
for field in self.dtype.names], axis=-1)
return result.any(axis=-1)
def _combine_masks(self, masks, out=None):
masks = [m for m in masks if m is not None and m is not False]
if not masks:
return False
if len(masks) == 1:
if out is None:
return masks[0].copy()
else:
np.copyto(out, masks[0])
return out
out = np.logical_or(masks[0], masks[1], out=out)
for mask in masks[2:]:
np.logical_or(out, mask, out=out)
return out
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.pop('out', None)
out_unmasked = None
out_mask = None
if out is not None:
out_unmasked, out_masks = self._get_data_and_masks(*out)
for d, m in zip(out_unmasked, out_masks):
if m is None:
# TODO: allow writing to unmasked output if nothing is masked?
if d is not None:
raise TypeError('cannot write to unmasked output')
elif out_mask is None:
out_mask = m
unmasked, masks = self._get_data_and_masks(*inputs)
if ufunc.signature:
# We're dealing with a gufunc. For now, only deal with
# np.matmul and gufuncs for which the mask of any output always
# depends on all core dimension values of all inputs.
# Also ignore axes keyword for now...
# TODO: in principle, it should be possible to generate the mask
# purely based on the signature.
if 'axes' in kwargs:
raise NotImplementedError("Masked does not yet support gufunc "
"calls with 'axes'.")
if ufunc is np.matmul:
# np.matmul is tricky and its signature cannot be parsed by
# _parse_gufunc_signature.
unmasked = np.atleast_1d(*unmasked)
mask0, mask1 = masks
masks = []
is_mat1 = unmasked[1].ndim >= 2
if mask0 is not None:
masks.append(
np.logical_or.reduce(mask0, axis=-1, keepdims=is_mat1))
if mask1 is not None:
masks.append(
np.logical_or.reduce(mask1, axis=-2, keepdims=True)
if is_mat1 else
np.logical_or.reduce(mask1))
mask = self._combine_masks(masks, out=out_mask)
else:
# Parse signature with private numpy function. Note it
# cannot handle spaces in tuples, so remove those.
in_sig, out_sig = np.lib.function_base._parse_gufunc_signature(
ufunc.signature.replace(' ', ''))
axis = kwargs.get('axis', -1)
keepdims = kwargs.get('keepdims', False)
in_masks = []
for sig, mask in zip(in_sig, masks):
if mask is not None:
if sig:
# Input has core dimensions. Assume that if any
# value in those is masked, the output will be
# masked too (TODO: for multiple core dimensions
# this may be too strong).
mask = np.logical_or.reduce(
mask, axis=axis, keepdims=keepdims)
in_masks.append(mask)
mask = self._combine_masks(in_masks)
result_masks = []
for os in out_sig:
if os:
# Output has core dimensions. Assume all those
# get the same mask.
result_mask = np.expand_dims(mask, axis)
else:
result_mask = mask
result_masks.append(result_mask)
mask = result_masks if len(result_masks) > 1 else result_masks[0]
elif method == '__call__':
# Regular ufunc call.
mask = self._combine_masks(masks, out=out_mask)
elif method == 'outer':
# Must have two arguments; adjust masks as will be done for data.
assert len(masks) == 2
masks = [(m if m is not None else False) for m in masks]
mask = np.logical_or.outer(masks[0], masks[1], out=out_mask)
elif method in {'reduce', 'accumulate'}:
# Reductions like np.add.reduce (sum).
if masks[0] is not None:
# By default, we simply propagate masks, since for
# things like np.sum, it makes no sense to do otherwise.
# Individual methods need to override as needed.
# TODO: take care of 'out' too?
if method == 'reduce':
axis = kwargs.get('axis', None)
keepdims = kwargs.get('keepdims', False)
where = kwargs.get('where', True)
mask = np.logical_or.reduce(masks[0], where=where,
axis=axis, keepdims=keepdims,
out=out_mask)
if where is not True:
# Mask also whole rows that were not selected by where,
# so would have been left as unmasked above.
mask |= np.logical_and.reduce(masks[0], where=where,
axis=axis, keepdims=keepdims)
else:
# Accumulate
axis = kwargs.get('axis', 0)
mask = np.logical_or.accumulate(masks[0], axis=axis,
out=out_mask)
elif out is not None:
mask = False
else: # pragma: no cover
# Can only get here if neither input nor output was masked, but
# perhaps axis or where was masked (in numpy < 1.21 this is
# possible). We don't support this.
return NotImplemented
elif method in {'reduceat', 'at'}: # pragma: no cover
# TODO: implement things like np.add.accumulate (used for cumsum).
raise NotImplementedError("masked instances cannot yet deal with "
"'reduceat' or 'at'.")
if out_unmasked is not None:
kwargs['out'] = out_unmasked
result = getattr(ufunc, method)(*unmasked, **kwargs)
if result is None: # pragma: no cover
# This happens for the "at" method.
return result
if out is not None and len(out) == 1:
out = out[0]
return self._masked_result(result, mask, out)
def __array_function__(self, function, types, args, kwargs):
# TODO: go through functions systematically to see which ones
# work and/or can be supported.
if function in MASKED_SAFE_FUNCTIONS:
return super().__array_function__(function, types, args, kwargs)
elif function in APPLY_TO_BOTH_FUNCTIONS:
helper = APPLY_TO_BOTH_FUNCTIONS[function]
try:
helper_result = helper(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
data_args, mask_args, kwargs, out = helper_result
if out is not None:
if not isinstance(out, Masked):
return self._not_implemented_or_raise(function, types)
function(*mask_args, out=out.mask, **kwargs)
function(*data_args, out=out.unmasked, **kwargs)
return out
mask = function(*mask_args, **kwargs)
result = function(*data_args, **kwargs)
elif function in DISPATCHED_FUNCTIONS:
dispatched_function = DISPATCHED_FUNCTIONS[function]
try:
dispatched_result = dispatched_function(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
if not isinstance(dispatched_result, tuple):
return dispatched_result
result, mask, out = dispatched_result
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else: # pragma: no cover
# By default, just pass it through for now.
return super().__array_function__(function, types, args, kwargs)
if mask is None:
return result
else:
return self._masked_result(result, mask, out)
def _not_implemented_or_raise(self, function, types):
# Our function helper or dispatcher found that the function does not
# work with Masked. In principle, there may be another class that
# knows what to do with us, for which we should return NotImplemented.
# But if there is ndarray (or a non-Masked subclass of it) around,
# it quite likely coerces, so we should just break.
if any(issubclass(t, np.ndarray) and not issubclass(t, Masked)
for t in types):
raise TypeError("the MaskedNDArray implementation cannot handle {} "
"with the given arguments."
.format(function)) from None
else:
return NotImplemented
def _masked_result(self, result, mask, out):
if isinstance(result, tuple):
if out is None:
out = (None,) * len(result)
if not isinstance(mask, (list, tuple)):
mask = (mask,) * len(result)
return tuple(self._masked_result(result_, mask_, out_)
for (result_, mask_, out_) in zip(result, mask, out))
if out is None:
# Note that we cannot count on result being the same class as
# 'self' (e.g., comparison of quantity results in an ndarray, most
# operations on Longitude and Latitude result in Angle or
# Quantity), so use Masked to determine the appropriate class.
return Masked(result, mask)
# TODO: remove this sanity check once test cases are more complete.
assert isinstance(out, Masked)
# If we have an output, the result was written in-place, so we should
# also write the mask in-place (if not done already in the code).
if out._mask is not mask:
out._mask[...] = mask
return out
# Below are ndarray methods that need to be overridden as masked elements
# need to be skipped and/or an initial value needs to be set.
def _reduce_defaults(self, kwargs, initial_func=None):
"""Get default where and initial for masked reductions.
Generally, the default should be to skip all masked elements. For
reductions such as np.minimum.reduce, we also need an initial value,
which can be determined using ``initial_func``.
"""
if 'where' not in kwargs:
kwargs['where'] = ~self.mask
if initial_func is not None and 'initial' not in kwargs:
kwargs['initial'] = initial_func(self.unmasked)
return kwargs
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
# Unfortunately, cannot override the call to diagonal inside trace, so
# duplicate implementation in numpy/core/src/multiarray/calculation.c.
diagonal = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)
return diagonal.sum(-1, dtype=dtype, out=out)
def min(self, axis=None, out=None, **kwargs):
return super().min(axis=axis, out=out,
**self._reduce_defaults(kwargs, np.nanmax))
def max(self, axis=None, out=None, **kwargs):
return super().max(axis=axis, out=out,
**self._reduce_defaults(kwargs, np.nanmin))
def nonzero(self):
unmasked_nonzero = self.unmasked.nonzero()
if self.ndim >= 1:
not_masked = ~self.mask[unmasked_nonzero]
return tuple(u[not_masked] for u in unmasked_nonzero)
else:
return unmasked_nonzero if not self.mask else np.nonzero(0)
def compress(self, condition, axis=None, out=None):
if out is not None:
raise NotImplementedError('cannot yet give output')
return self._apply('compress', condition, axis=axis)
def repeat(self, repeats, axis=None):
return self._apply('repeat', repeats, axis=axis)
def choose(self, choices, out=None, mode='raise'):
# Let __array_function__ take care since choices can be masked too.
return np.choose(self, choices, out=out, mode=mode)
if NUMPY_LT_1_22:
def argmin(self, axis=None, out=None):
# Todo: should this return a masked integer array, with masks
# if all elements were masked?
at_min = self == self.min(axis=axis, keepdims=True)
return at_min.filled(False).argmax(axis=axis, out=out)
def argmax(self, axis=None, out=None):
at_max = self == self.max(axis=axis, keepdims=True)
return at_max.filled(False).argmax(axis=axis, out=out)
else:
def argmin(self, axis=None, out=None, *, keepdims=False):
# Todo: should this return a masked integer array, with masks
# if all elements were masked?
at_min = self == self.min(axis=axis, keepdims=True)
return at_min.filled(False).argmax(axis=axis, out=out, keepdims=keepdims)
def argmax(self, axis=None, out=None, *, keepdims=False):
at_max = self == self.max(axis=axis, keepdims=True)
return at_max.filled(False).argmax(axis=axis, out=out, keepdims=keepdims)
def argsort(self, axis=-1, kind=None, order=None):
"""Returns the indices that would sort an array.
Perform an indirect sort along the given axis on both the array
and the mask, with masked items being sorted to the end.
Parameters
----------
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis).
If None, the flattened array is used.
kind : str or None, ignored.
The kind of sort. Present only to allow subclasses to work.
order : str or list of str.
For an array with fields defined, the fields to compare first,
second, etc. A single field can be specified as a string, and not
all fields need be specified, but unspecified fields will still be
used, in dtype order, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sorts along the specified ``axis``. Use
``np.take_along_axis(self, index_array, axis=axis)`` to obtain
the sorted array.
"""
if axis is None:
data = self.ravel()
axis = -1
else:
data = self
if self.dtype.names:
# As done inside the argsort implementation in multiarray/methods.c.
if order is None:
order = self.dtype.names
else:
order = np.core._internal._newnames(self.dtype, order)
keys = tuple(data[name] for name in order[::-1])
elif order is not None:
raise ValueError('Cannot specify order when the array has no fields.')
else:
keys = (data,)
return np.lexsort(keys, axis=axis)
def sort(self, axis=-1, kind=None, order=None):
"""Sort an array in-place. Refer to `numpy.sort` for full documentation."""
# TODO: probably possible to do this faster than going through argsort!
indices = self.argsort(axis, kind=kind, order=order)
self[:] = np.take_along_axis(self, indices, axis=axis)
def argpartition(self, kth, axis=-1, kind='introselect', order=None):
# TODO: should be possible to do this faster than with a full argsort!
return self.argsort(axis=axis, order=order)
def partition(self, kth, axis=-1, kind='introselect', order=None):
# TODO: should be possible to do this faster than with a full argsort!
return self.sort(axis=axis, order=None)
def cumsum(self, axis=None, dtype=None, out=None):
if axis is None:
self = self.ravel()
axis = 0
return np.add.accumulate(self, axis=axis, dtype=dtype, out=out)
def cumprod(self, axis=None, dtype=None, out=None):
if axis is None:
self = self.ravel()
axis = 0
return np.multiply.accumulate(self, axis=axis, dtype=dtype, out=out)
def clip(self, min=None, max=None, out=None, **kwargs):
"""Return an array whose values are limited to ``[min, max]``.
Like `~numpy.clip`, but any masked values in ``min`` and ``max``
are ignored for clipping. The mask of the input array is propagated.
"""
# TODO: implement this at the ufunc level.
dmin, mmin = self._get_data_and_mask(min)
dmax, mmax = self._get_data_and_mask(max)
if mmin is None and mmax is None:
# Fast path for unmasked max, min.
return super().clip(min, max, out=out, **kwargs)
masked_out = np.positive(self, out=out)
out = masked_out.unmasked
if dmin is not None:
np.maximum(out, dmin, out=out, where=True if mmin is None else ~mmin)
if dmax is not None:
np.minimum(out, dmax, out=out, where=True if mmax is None else ~mmax)
return masked_out
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
# Implementation based on that in numpy/core/_methods.py
# Cast bool, unsigned int, and int to float64 by default,
# and do float16 at higher precision.
is_float16_result = False
if dtype is None:
if issubclass(self.dtype.type, (np.integer, np.bool_)):
dtype = np.dtype('f8')
elif issubclass(self.dtype.type, np.float16):
dtype = np.dtype('f4')
is_float16_result = out is None
where = ~self.mask & where
result = self.sum(axis=axis, dtype=dtype, out=out,
keepdims=keepdims, where=where)
n = np.add.reduce(where, axis=axis, keepdims=keepdims)
result /= n
if is_float16_result:
result = result.astype(self.dtype)
return result
def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True):
where_final = ~self.mask & where
# Simplified implementation based on that in numpy/core/_methods.py
n = np.add.reduce(where_final, axis=axis, keepdims=keepdims)[...]
# Cast bool, unsigned int, and int to float64 by default.
if dtype is None and issubclass(self.dtype.type,
(np.integer, np.bool_)):
dtype = np.dtype('f8')
mean = self.mean(axis=axis, dtype=dtype, keepdims=True, where=where)
x = self - mean
x *= x.conjugate() # Conjugate just returns x if not complex.
result = x.sum(axis=axis, dtype=dtype, out=out,
keepdims=keepdims, where=where_final)
n -= ddof
n = np.maximum(n, 0, out=n)
result /= n
result._mask |= (n == 0)
return result
def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True):
result = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims, where=where)
return np.sqrt(result, out=result)
def __bool__(self):
# First get result from array itself; this will error if not a scalar.
result = super().__bool__()
return result and not self.mask
def any(self, axis=None, out=None, keepdims=False, *, where=True):
return np.logical_or.reduce(self, axis=axis, out=out,
keepdims=keepdims, where=~self.mask & where)
def all(self, axis=None, out=None, keepdims=False, *, where=True):
return np.logical_and.reduce(self, axis=axis, out=out,
keepdims=keepdims, where=~self.mask & where)
# Following overrides needed since somehow the ndarray implementation
# does not actually call these.
def __str__(self):
return np.array_str(self)
def __repr__(self):
return np.array_repr(self)
def __format__(self, format_spec):
string = super().__format__(format_spec)
if self.shape == () and self.mask:
n = min(3, max(1, len(string)))
return ' ' * (len(string)-n) + '\u2014' * n
else:
return string
class MaskedRecarray(np.recarray, MaskedNDArray, data_cls=np.recarray):
# Explicit definition since we need to override some methods.
def __array_finalize__(self, obj):
# recarray.__array_finalize__ does not do super, so we do it
# explicitly.
super().__array_finalize__(obj)
super(np.recarray, self).__array_finalize__(obj)
# __getattribute__, __setattr__, and field use these somewhat
# obscrure ndarray methods. TODO: override in MaskedNDArray?
def getfield(self, dtype, offset=0):
for field, info in self.dtype.fields.items():
if offset == info[1] and dtype == info[0]:
return self[field]
raise NotImplementedError('can only get existing field from '
'structured dtype.')
def setfield(self, val, dtype, offset=0):
for field, info in self.dtype.fields.items():
if offset == info[1] and dtype == info[0]:
self[field] = val
return
raise NotImplementedError('can only set existing field from '
'structured dtype.')
|
43016585b39c175366044bee48452701a4fa4191d5b2259b566b5f8572e046a6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Helpers for letting numpy functions interact with Masked arrays.
The module supplies helper routines for numpy functions that propagate
masks appropriately., for use in the ``__array_function__``
implementation of `~astropy.utils.masked.MaskedNDArray`. They are not
very useful on their own, but the ones with docstrings are included in
the documentation so that there is a place to find out how the mask is
interpreted.
"""
import numpy as np
from astropy.units.quantity_helper.function_helpers import (
FunctionAssigner)
from astropy.utils.compat import NUMPY_LT_1_19, NUMPY_LT_1_20, NUMPY_LT_1_23
# This module should not really be imported, but we define __all__
# such that sphinx can typeset the functions with docstrings.
# The latter are added to __all__ at the end.
__all__ = ['MASKED_SAFE_FUNCTIONS', 'APPLY_TO_BOTH_FUNCTIONS',
'DISPATCHED_FUNCTIONS', 'UNSUPPORTED_FUNCTIONS']
MASKED_SAFE_FUNCTIONS = set()
"""Set of functions that work fine on Masked classes already.
Most of these internally use `numpy.ufunc` or other functions that
are already covered.
"""
APPLY_TO_BOTH_FUNCTIONS = {}
"""Dict of functions that should apply to both data and mask.
The `dict` is keyed by the numpy function and the values are functions
that take the input arguments of the numpy function and organize these
for passing the data and mask to the numpy function.
Returns
-------
data_args : tuple
Arguments to pass on to the numpy function for the unmasked data.
mask_args : tuple
Arguments to pass on to the numpy function for the masked data.
kwargs : dict
Keyword arguments to pass on for both unmasked data and mask.
out : `~astropy.utils.masked.Masked` instance or None
Optional instance in which to store the output.
Raises
------
NotImplementedError
When an arguments is masked when it should not be or vice versa.
"""
DISPATCHED_FUNCTIONS = {}
"""Dict of functions that provide the numpy function's functionality.
These are for more complicated versions where the numpy function itself
cannot easily be used. It should return either the result of the
function, or a tuple consisting of the unmasked result, the mask for the
result and a possible output instance.
It should raise `NotImplementedError` if one of the arguments is masked
when it should not be or vice versa.
"""
UNSUPPORTED_FUNCTIONS = set()
"""Set of numpy functions that are not supported for masked arrays.
For most, masked input simply makes no sense, but for others it may have
been lack of time. Issues or PRs for support for functions are welcome.
"""
# Almost all from np.core.fromnumeric defer to methods so are OK.
MASKED_SAFE_FUNCTIONS |= {
getattr(np, name) for name in np.core.fromnumeric.__all__
if name not in {'choose', 'put', 'resize', 'searchsorted', 'where', 'alen'}}
MASKED_SAFE_FUNCTIONS |= {
# built-in from multiarray
np.may_share_memory, np.can_cast, np.min_scalar_type, np.result_type,
np.shares_memory,
# np.core.arrayprint
np.array_repr,
# np.core.function_base
np.linspace, np.logspace, np.geomspace,
# np.core.numeric
np.isclose, np.allclose, np.flatnonzero, np.argwhere,
# np.core.shape_base
np.atleast_1d, np.atleast_2d, np.atleast_3d, np.stack, np.hstack, np.vstack,
# np.lib.function_base
np.average, np.diff, np.extract, np.meshgrid, np.trapz, np.gradient,
# np.lib.index_tricks
np.diag_indices_from, np.triu_indices_from, np.tril_indices_from,
np.fill_diagonal,
# np.lib.shape_base
np.column_stack, np.row_stack, np.dstack,
np.array_split, np.split, np.hsplit, np.vsplit, np.dsplit,
np.expand_dims, np.apply_along_axis, np.kron, np.tile,
np.take_along_axis, np.put_along_axis,
# np.lib.type_check (all but asfarray, nan_to_num)
np.iscomplexobj, np.isrealobj, np.imag, np.isreal,
np.real, np.real_if_close, np.common_type,
# np.lib.ufunclike
np.fix, np.isneginf, np.isposinf,
# np.lib.function_base
np.angle, np.i0,
}
IGNORED_FUNCTIONS = {
# I/O - useless for Masked, since no way to store the mask.
np.save, np.savez, np.savetxt, np.savez_compressed,
# Polynomials
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander}
if NUMPY_LT_1_20:
# financial
IGNORED_FUNCTIONS |= {np.fv, np.ipmt, np.irr, np.mirr, np.nper,
np.npv, np.pmt, np.ppmt, np.pv, np.rate}
# TODO: some of the following could in principle be supported.
IGNORED_FUNCTIONS |= {
np.pad,
np.searchsorted, np.digitize,
np.is_busday, np.busday_count, np.busday_offset,
# numpy.lib.function_base
np.cov, np.corrcoef, np.trim_zeros,
# numpy.core.numeric
np.correlate, np.convolve,
# numpy.lib.histograms
np.histogram, np.histogram2d, np.histogramdd, np.histogram_bin_edges,
# TODO!!
np.dot, np.vdot, np.inner, np.tensordot, np.cross,
np.einsum, np.einsum_path,
}
# Really should do these...
IGNORED_FUNCTIONS |= {getattr(np, setopsname) for setopsname in np.lib.arraysetops.__all__}
if NUMPY_LT_1_23:
IGNORED_FUNCTIONS |= {
# Deprecated, removed in numpy 1.23
np.asscalar, np.alen,
}
# Explicitly unsupported functions
UNSUPPORTED_FUNCTIONS |= {
np.unravel_index, np.ravel_multi_index, np.ix_,
}
# No support for the functions also not supported by Quantity
# (io, polynomial, etc.).
UNSUPPORTED_FUNCTIONS |= IGNORED_FUNCTIONS
apply_to_both = FunctionAssigner(APPLY_TO_BOTH_FUNCTIONS)
dispatched_function = FunctionAssigner(DISPATCHED_FUNCTIONS)
def _get_data_and_masks(*args):
"""Separate out arguments into tuples of data and masks.
An all-False mask is created if an argument does not have a mask.
"""
from .core import Masked
data, masks = Masked._get_data_and_masks(*args)
masks = tuple(m if m is not None else np.zeros(np.shape(d), bool)
for d, m in zip(data, masks))
return data, masks
# Following are simple ufunc-like functions which should just copy the mask.
@dispatched_function
def datetime_as_string(arr, *args, **kwargs):
return (np.datetime_as_string(arr.unmasked, *args, **kwargs),
arr.mask.copy(), None)
@dispatched_function
def sinc(x):
return np.sinc(x.unmasked), x.mask.copy(), None
@dispatched_function
def iscomplex(x):
return np.iscomplex(x.unmasked), x.mask.copy(), None
@dispatched_function
def unwrap(p, *args, **kwargs):
return np.unwrap(p.unmasked, *args, **kwargs), p.mask.copy(), None
@dispatched_function
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
data = np.nan_to_num(x.unmasked, copy=copy,
nan=nan, posinf=posinf, neginf=neginf)
return (data, x.mask.copy(), None) if copy else x
# Following are simple functions related to shapes, where the same function
# should be applied to the data and the mask. They cannot all share the
# same helper, because the first arguments have different names.
@apply_to_both(helps={
np.copy, np.asfarray, np.resize, np.moveaxis, np.rollaxis, np.roll})
def masked_a_helper(a, *args, **kwargs):
data, mask = _get_data_and_masks(a)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.flip, np.flipud, np.fliplr, np.rot90, np.triu, np.tril})
def masked_m_helper(m, *args, **kwargs):
data, mask = _get_data_and_masks(m)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.diag, np.diagflat})
def masked_v_helper(v, *args, **kwargs):
data, mask = _get_data_and_masks(v)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.delete})
def masked_arr_helper(array, *args, **kwargs):
data, mask = _get_data_and_masks(array)
return data + args, mask + args, kwargs, None
@apply_to_both
def broadcast_to(array, shape, subok=False):
"""Broadcast array to the given shape.
Like `numpy.broadcast_to`, and applied to both unmasked data and mask.
Note that ``subok`` is taken to mean whether or not subclasses of
the unmasked data and mask are allowed, i.e., for ``subok=False``,
a `~astropy.utils.masked.MaskedNDArray` will be returned.
"""
data, mask = _get_data_and_masks(array)
return data, mask, dict(shape=shape, subok=subok), None
@dispatched_function
def outer(a, b, out=None):
return np.multiply.outer(np.ravel(a), np.ravel(b), out=out)
@dispatched_function
def empty_like(prototype, dtype=None, order='K', subok=True, shape=None):
"""Return a new array with the same shape and type as a given array.
Like `numpy.empty_like`, but will add an empty mask.
"""
unmasked = np.empty_like(prototype.unmasked, dtype=dtype, order=order,
subok=subok, shape=shape)
if dtype is not None:
dtype = (np.ma.make_mask_descr(unmasked.dtype)
if unmasked.dtype.names else np.dtype('?'))
mask = np.empty_like(prototype.mask, dtype=dtype, order=order,
subok=subok, shape=shape)
return unmasked, mask, None
@dispatched_function
def zeros_like(a, dtype=None, order='K', subok=True, shape=None):
"""Return an array of zeros with the same shape and type as a given array.
Like `numpy.zeros_like`, but will add an all-false mask.
"""
unmasked = np.zeros_like(a.unmasked, dtype=dtype, order=order,
subok=subok, shape=shape)
return unmasked, False, None
@dispatched_function
def ones_like(a, dtype=None, order='K', subok=True, shape=None):
"""Return an array of ones with the same shape and type as a given array.
Like `numpy.ones_like`, but will add an all-false mask.
"""
unmasked = np.ones_like(a.unmasked, dtype=dtype, order=order,
subok=subok, shape=shape)
return unmasked, False, None
@dispatched_function
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):
"""Return a full array with the same shape and type as a given array.
Like `numpy.full_like`, but with a mask that is also set.
If ``fill_value`` is `numpy.ma.masked`, the data will be left unset
(i.e., as created by `numpy.empty_like`).
"""
result = np.empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
result[...] = fill_value
return result
@dispatched_function
def put(a, ind, v, mode='raise'):
"""Replaces specified elements of an array with given values.
Like `numpy.put`, but for masked array ``a`` and possibly masked
value ``v``. Masked indices ``ind`` are not supported.
"""
from astropy.utils.masked import Masked
if isinstance(ind, Masked) or not isinstance(a, Masked):
raise NotImplementedError
v_data, v_mask = a._get_data_and_mask(v)
if v_data is not None:
np.put(a.unmasked, ind, v_data, mode=mode)
# v_mask of None will be correctly interpreted as False.
np.put(a.mask, ind, v_mask, mode=mode)
return None
@dispatched_function
def putmask(a, mask, values):
"""Changes elements of an array based on conditional and input values.
Like `numpy.putmask`, but for masked array ``a`` and possibly masked
``values``. Masked ``mask`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(mask, Masked) or not isinstance(a, Masked):
raise NotImplementedError
values_data, values_mask = a._get_data_and_mask(values)
if values_data is not None:
np.putmask(a.unmasked, mask, values_data)
np.putmask(a.mask, mask, values_mask)
return None
@dispatched_function
def place(arr, mask, vals):
"""Change elements of an array based on conditional and input values.
Like `numpy.place`, but for masked array ``a`` and possibly masked
``values``. Masked ``mask`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(mask, Masked) or not isinstance(arr, Masked):
raise NotImplementedError
vals_data, vals_mask = arr._get_data_and_mask(vals)
if vals_data is not None:
np.place(arr.unmasked, mask, vals_data)
np.place(arr.mask, mask, vals_mask)
return None
@dispatched_function
def copyto(dst, src, casting='same_kind', where=True):
"""Copies values from one array to another, broadcasting as necessary.
Like `numpy.copyto`, but for masked destination ``dst`` and possibly
masked source ``src``.
"""
from astropy.utils.masked import Masked
if not isinstance(dst, Masked) or isinstance(where, Masked):
raise NotImplementedError
src_data, src_mask = dst._get_data_and_mask(src)
if src_data is not None:
np.copyto(dst.unmasked, src_data, casting=casting, where=where)
if src_mask is not None:
np.copyto(dst.mask, src_mask, where=where)
return None
@dispatched_function
def packbits(a, *args, **kwargs):
result = np.packbits(a.unmasked, *args, **kwargs)
mask = np.packbits(a.mask, *args, **kwargs).astype(bool)
return result, mask, None
@dispatched_function
def unpackbits(a, *args, **kwargs):
result = np.unpackbits(a.unmasked, *args, **kwargs)
mask = np.zeros(a.shape, dtype='u1')
mask[a.mask] = 255
mask = np.unpackbits(mask, *args, **kwargs).astype(bool)
return result, mask, None
@dispatched_function
def bincount(x, weights=None, minlength=0):
"""Count number of occurrences of each value in array of non-negative ints.
Like `numpy.bincount`, but masked entries in ``x`` will be skipped.
Any masked entries in ``weights`` will lead the corresponding bin to
be masked.
"""
from astropy.utils.masked import Masked
if weights is not None:
weights = np.asanyarray(weights)
if isinstance(x, Masked) and x.ndim <= 1:
# let other dimensions lead to errors.
if weights is not None and weights.ndim == x.ndim:
weights = weights[~x.mask]
x = x.unmasked[~x.mask]
mask = None
if weights is not None:
weights, w_mask = Masked._get_data_and_mask(weights)
if w_mask is not None:
mask = np.bincount(x, w_mask.astype(int),
minlength=minlength).astype(bool)
result = np.bincount(x, weights, minlength=0)
return result, mask, None
@dispatched_function
def msort(a):
result = a.copy()
result.sort(axis=0)
return result
@dispatched_function
def sort_complex(a):
# Just a copy of function_base.sort_complex, to avoid the asarray.
b = a.copy()
b.sort()
if not issubclass(b.dtype.type, np.complexfloating): # pragma: no cover
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
if NUMPY_LT_1_20:
@apply_to_both
def concatenate(arrays, axis=0, out=None):
data, masks = _get_data_and_masks(*arrays)
return (data,), (masks,), dict(axis=axis), out
else:
@dispatched_function
def concatenate(arrays, axis=0, out=None, dtype=None, casting='same_kind'):
data, masks = _get_data_and_masks(*arrays)
if out is None:
return (np.concatenate(data, axis=axis, dtype=dtype, casting=casting),
np.concatenate(masks, axis=axis),
None)
else:
from astropy.utils.masked import Masked
if not isinstance(out, Masked):
raise NotImplementedError
np.concatenate(masks, out=out.mask, axis=axis)
np.concatenate(data, out=out.unmasked, axis=axis, dtype=dtype, casting=casting)
return out
@apply_to_both
def append(arr, values, axis=None):
data, masks = _get_data_and_masks(arr, values)
return data, masks, dict(axis=axis), None
@dispatched_function
def block(arrays):
# We need to override block since the numpy implementation can take two
# different paths, one for concatenation, one for creating a large empty
# result array in which parts are set. Each assumes array input and
# cannot be used directly. Since it would be very costly to inspect all
# arrays and then turn them back into a nested list, we just copy here the
# second implementation, np.core.shape_base._block_slicing, since it is
# shortest and easiest.
from astropy.utils.masked import Masked
(arrays, list_ndim, result_ndim,
final_size) = np.core.shape_base._block_setup(arrays)
shape, slices, arrays = np.core.shape_base._block_info_recursion(
arrays, list_ndim, result_ndim)
dtype = np.result_type(*[arr.dtype for arr in arrays])
F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
order = 'F' if F_order and not C_order else 'C'
result = Masked(np.empty(shape=shape, dtype=dtype, order=order))
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result
@dispatched_function
def broadcast_arrays(*args, subok=True):
"""Broadcast arrays to a common shape.
Like `numpy.broadcast_arrays`, applied to both unmasked data and masks.
Note that ``subok`` is taken to mean whether or not subclasses of
the unmasked data and masks are allowed, i.e., for ``subok=False``,
`~astropy.utils.masked.MaskedNDArray` instances will be returned.
"""
from .core import Masked
are_masked = [isinstance(arg, Masked) for arg in args]
data = [(arg.unmasked if is_masked else arg)
for arg, is_masked in zip(args, are_masked)]
results = np.broadcast_arrays(*data, subok=subok)
shape = results[0].shape if isinstance(results, list) else results.shape
masks = [(np.broadcast_to(arg.mask, shape, subok=subok)
if is_masked else None)
for arg, is_masked in zip(args, are_masked)]
results = [(Masked(result, mask) if mask is not None else result)
for (result, mask) in zip(results, masks)]
return results if len(results) > 1 else results[0]
@apply_to_both
def insert(arr, obj, values, axis=None):
"""Insert values along the given axis before the given indices.
Like `numpy.insert` but for possibly masked ``arr`` and ``values``.
Masked ``obj`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(obj, Masked) or not isinstance(arr, Masked):
raise NotImplementedError
(arr_data, val_data), (arr_mask, val_mask) = _get_data_and_masks(arr, values)
return ((arr_data, obj, val_data, axis),
(arr_mask, obj, val_mask, axis), {}, None)
if NUMPY_LT_1_19:
@dispatched_function
def count_nonzero(a, axis=None):
"""Counts the number of non-zero values in the array ``a``.
Like `numpy.count_nonzero`, with masked values counted as 0 or `False`.
"""
filled = a.filled(np.zeros((), a.dtype))
return np.count_nonzero(filled, axis)
else:
@dispatched_function
def count_nonzero(a, axis=None, *, keepdims=False):
"""Counts the number of non-zero values in the array ``a``.
Like `numpy.count_nonzero`, with masked values counted as 0 or `False`.
"""
filled = a.filled(np.zeros((), a.dtype))
return np.count_nonzero(filled, axis, keepdims=keepdims)
if NUMPY_LT_1_19:
def _zeros_like(a, dtype=None, order='K', subok=True, shape=None):
if shape != ():
return np.zeros_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
else:
return np.zeros_like(a, dtype=dtype, order=order, subok=subok,
shape=(1,))[0]
else:
_zeros_like = np.zeros_like
def _masked_median_1d(a, overwrite_input):
# TODO: need an in-place mask-sorting option.
unmasked = a.unmasked[~a.mask]
if unmasked.size:
return a.from_unmasked(
np.median(unmasked, overwrite_input=overwrite_input))
else:
return a.from_unmasked(_zeros_like(a.unmasked, shape=(1,))[0], mask=True)
def _masked_median(a, axis=None, out=None, overwrite_input=False):
# As for np.nanmedian, but without a fast option as yet.
if axis is None or a.ndim == 1:
part = a.ravel()
result = _masked_median_1d(part, overwrite_input)
else:
result = np.apply_along_axis(_masked_median_1d, axis, a, overwrite_input)
if out is not None:
out[...] = result
return result
@dispatched_function
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
from astropy.utils.masked import Masked
if out is not None and not isinstance(out, Masked):
raise NotImplementedError
a = Masked(a)
r, k = np.lib.function_base._ureduce(
a, func=_masked_median, axis=axis, out=out,
overwrite_input=overwrite_input)
return (r.reshape(k) if keepdims else r) if out is None else out
def _masked_quantile_1d(a, q, **kwargs):
"""
Private function for rank 1 arrays. Compute quantile ignoring NaNs.
See nanpercentile for parameter usage
"""
unmasked = a.unmasked[~a.mask]
if unmasked.size:
result = np.lib.function_base._quantile_unchecked(unmasked, q, **kwargs)
return a.from_unmasked(result)
else:
return a.from_unmasked(_zeros_like(a.unmasked, shape=q.shape), True)
def _masked_quantile(a, q, axis=None, out=None, **kwargs):
# As for np.nanmedian, but without a fast option as yet.
if axis is None or a.ndim == 1:
part = a.ravel()
result = _masked_quantile_1d(part, q, **kwargs)
else:
result = np.apply_along_axis(_masked_quantile_1d, axis, a, q, **kwargs)
# apply_along_axis fills in collapsed axis with results.
# Move that axis to the beginning to match percentile's
# convention.
if q.ndim != 0:
result = np.moveaxis(result, axis, 0)
if out is not None:
out[...] = result
return result
@dispatched_function
def quantile(a, q, axis=None, out=None, **kwargs):
from astropy.utils.masked import Masked
if isinstance(q, Masked) or out is not None and not isinstance(out, Masked):
raise NotImplementedError
a = Masked(a)
q = np.asanyarray(q)
if not np.lib.function_base._quantile_is_valid(q):
raise ValueError("Quantiles must be in the range [0, 1]")
keepdims = kwargs.pop('keepdims', False)
r, k = np.lib.function_base._ureduce(
a, func=_masked_quantile, q=q, axis=axis, out=out, **kwargs)
return (r.reshape(k) if keepdims else r) if out is None else out
@dispatched_function
def percentile(a, q, *args, **kwargs):
q = np.true_divide(q, 100)
return quantile(a, q, *args, **kwargs)
@dispatched_function
def array_equal(a1, a2, equal_nan=False):
(a1d, a2d), (a1m, a2m) = _get_data_and_masks(a1, a2)
if a1d.shape != a2d.shape:
return False
equal = (a1d == a2d)
if equal_nan:
equal |= np.isnan(a1d) & np.isnan(a2d)
return bool((equal | a1m | a2m).all())
@dispatched_function
def array_equiv(a1, a2):
return bool((a1 == a2).all())
@dispatched_function
def where(condition, *args):
from astropy.utils.masked import Masked
if not args:
return condition.nonzero(), None, None
condition, c_mask = Masked._get_data_and_mask(condition)
data, masks = _get_data_and_masks(*args)
unmasked = np.where(condition, *data)
mask = np.where(condition, *masks)
if c_mask is not None:
mask |= c_mask
return Masked(unmasked, mask=mask)
@dispatched_function
def choose(a, choices, out=None, mode='raise'):
"""Construct an array from an index array and a set of arrays to choose from.
Like `numpy.choose`. Masked indices in ``a`` will lead to masked output
values and underlying data values are ignored if out of bounds (for
``mode='raise'``). Any values masked in ``choices`` will be propagated
if chosen.
"""
from astropy.utils.masked import Masked
a_data, a_mask = Masked._get_data_and_mask(a)
if a_mask is not None and mode == 'raise':
# Avoid raising on masked indices.
a_data = a.filled(fill_value=0)
kwargs = {'mode': mode}
if out is not None:
if not isinstance(out, Masked):
raise NotImplementedError
kwargs['out'] = out.unmasked
data, masks = _get_data_and_masks(*choices)
data_chosen = np.choose(a_data, data, **kwargs)
if out is not None:
kwargs['out'] = out.mask
mask_chosen = np.choose(a_data, masks, **kwargs)
if a_mask is not None:
mask_chosen |= a_mask
return Masked(data_chosen, mask_chosen) if out is None else out
@apply_to_both
def select(condlist, choicelist, default=0):
"""Return an array drawn from elements in choicelist, depending on conditions.
Like `numpy.select`, with masks in ``choicelist`` are propagated.
Any masks in ``condlist`` are ignored.
"""
from astropy.utils.masked import Masked
condlist = [c.unmasked if isinstance(c, Masked) else c
for c in condlist]
data_list, mask_list = _get_data_and_masks(*choicelist)
default = Masked(default) if default is not np.ma.masked else Masked(0, mask=True)
return ((condlist, data_list, default.unmasked),
(condlist, mask_list, default.mask), {}, None)
@dispatched_function
def piecewise(x, condlist, funclist, *args, **kw):
"""Evaluate a piecewise-defined function.
Like `numpy.piecewise` but for masked input array ``x``.
Any masks in ``condlist`` are ignored.
"""
# Copied implementation from numpy.lib.function_base.piecewise,
# just to ensure output is Masked.
n2 = len(funclist)
# undocumented: single condition is promoted to a list of one condition
if np.isscalar(condlist) or (
not isinstance(condlist[0], (list, np.ndarray))
and x.ndim != 0): # pragma: no cover
condlist = [condlist]
condlist = np.array(condlist, dtype=bool)
n = len(condlist)
if n == n2 - 1: # compute the "otherwise" condition.
condelse = ~np.any(condlist, axis=0, keepdims=True)
condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
elif n != n2:
raise ValueError(
f"with {n} condition(s), either {n} or {n + 1} functions are expected"
)
# The one real change...
y = np.zeros_like(x)
where = []
what = []
for k in range(n):
item = funclist[k]
if not callable(item):
where.append(condlist[k])
what.append(item)
else:
vals = x[condlist[k]]
if vals.size > 0:
where.append(condlist[k])
what.append(item(vals, *args, **kw))
for item, value in zip(where, what):
y[item] = value
return y
@dispatched_function
def interp(x, xp, fp, *args, **kwargs):
"""One-dimensional linear interpolation.
Like `numpy.interp`, but any masked points in ``xp`` and ``fp``
are ignored. Any masked values in ``x`` will still be evaluated,
but masked on output.
"""
from astropy.utils.masked import Masked
xd, xm = Masked._get_data_and_mask(x)
if isinstance(xp, Masked) or isinstance(fp, Masked):
(xp, fp), (xpm, fpm) = _get_data_and_masks(xp, fp)
if xp.ndim == fp.ndim == 1:
# Avoid making arrays 1-D; will just raise below.
m = xpm | fpm
xp = xp[~m]
fp = fp[~m]
result = np.interp(xd, xp, fp, *args, **kwargs)
return result if xm is None else Masked(result, xm.copy())
@dispatched_function
def lexsort(keys, axis=-1):
"""Perform an indirect stable sort using a sequence of keys.
Like `numpy.lexsort` but for possibly masked ``keys``. Masked
values are sorted towards the end for each key.
"""
# Sort masks to the end.
from .core import Masked
new_keys = []
for key in keys:
if isinstance(key, Masked):
# If there are other keys below, want to be sure that
# for masked values, those other keys set the order.
new_key = key.unmasked
if new_keys and key.mask.any():
new_key = new_key.copy()
new_key[key.mask] = new_key.flat[0]
new_keys.extend([new_key, key.mask])
else:
new_keys.append(key)
return np.lexsort(new_keys, axis=axis)
@dispatched_function
def apply_over_axes(func, a, axes):
# Copied straight from numpy/lib/shape_base, just to omit its
# val = asarray(a); if only it had been asanyarray, or just not there
# since a is assumed to an an array in the next line...
# Which is what we do here - we can only get here if it is Masked.
val = a
N = a.ndim
if np.array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = np.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
class MaskedFormat:
"""Formatter for masked array scalars.
For use in `numpy.array2string`, wrapping the regular formatters such
that if a value is masked, its formatted string is replaced.
Typically initialized using the ``from_data`` class method.
"""
def __init__(self, format_function):
self.format_function = format_function
# Special case for structured void and subarray: we need to make all the
# format functions for the items masked as well.
# TODO: maybe is a separate class is more logical?
ffs = getattr(format_function, 'format_functions', None)
if ffs:
# StructuredVoidFormat: multiple format functions to be changed.
self.format_function.format_functions = [MaskedFormat(ff) for ff in ffs]
ff = getattr(format_function, 'format_function', None)
if ff:
# SubarrayFormat: change format function for the elements.
self.format_function.format_function = MaskedFormat(ff)
def __call__(self, x):
if x.dtype.names:
# The replacement of x with a list is needed because the function
# inside StructuredVoidFormat iterates over x, which works for an
# np.void but not an array scalar.
return self.format_function([x[field] for field in x.dtype.names])
if x.shape:
# For a subarray pass on the data directly, since the
# items will be iterated on inside the function.
return self.format_function(x)
# Single element: first just typeset it normally, replace with masked
# string if needed.
string = self.format_function(x.unmasked[()])
if x.mask:
# Strikethrough would be neat, but terminal needs a different
# formatting than, say, jupyter notebook.
# return "\x1B[9m"+string+"\x1B[29m"
# return ''.join(s+'\u0336' for s in string)
n = min(3, max(1, len(string)))
return ' ' * (len(string)-n) + '\u2014' * n
else:
return string
@classmethod
def from_data(cls, data, **options):
from numpy.core.arrayprint import _get_format_function
return cls(_get_format_function(data, **options))
def _array2string(a, options, separator=' ', prefix=""):
# Mostly copied from numpy.core.arrayprint, except:
# - The format function is wrapped in a mask-aware class;
# - Arrays scalars are not cast as arrays.
from numpy.core.arrayprint import _leading_trailing, _formatArray
data = np.asarray(a)
if a.size > options['threshold']:
summary_insert = "..."
data = _leading_trailing(data, options['edgeitems'])
else:
summary_insert = ""
# find the right formatting function for the array
format_function = MaskedFormat.from_data(data, **options)
# skip over "["
next_line_prefix = " "
# skip over array(
next_line_prefix += " "*len(prefix)
lst = _formatArray(a, format_function, options['linewidth'],
next_line_prefix, separator, options['edgeitems'],
summary_insert, options['legacy'])
return lst
@dispatched_function
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=np._NoValue, formatter=None, threshold=None,
edgeitems=None, sign=None, floatmode=None, suffix=""):
# Copied from numpy.core.arrayprint, but using _array2string above.
from numpy.core.arrayprint import _make_options_dict, _format_options
overrides = _make_options_dict(precision, threshold, edgeitems,
max_line_width, suppress_small, None, None,
sign, formatter, floatmode)
options = _format_options.copy()
options.update(overrides)
options['linewidth'] -= len(suffix)
# treat as a null array if any of shape elements == 0
if a.size == 0:
return "[]"
return _array2string(a, options, separator, prefix)
@dispatched_function
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
# Override to avoid special treatment of array scalars.
return array2string(a, max_line_width, precision, suppress_small, ' ', "")
# For the nanfunctions, we just treat any nan as an additional mask.
_nanfunc_fill_values = {'nansum': 0, 'nancumsum': 0,
'nanprod': 1, 'nancumprod': 1}
def masked_nanfunc(nanfuncname):
np_func = getattr(np, nanfuncname[3:])
fill_value = _nanfunc_fill_values.get(nanfuncname, None)
def nanfunc(a, *args, **kwargs):
from astropy.utils.masked import Masked
a, mask = Masked._get_data_and_mask(a)
if issubclass(a.dtype.type, np.inexact):
nans = np.isnan(a)
mask = nans if mask is None else (nans | mask)
if mask is not None:
a = Masked(a, mask)
if fill_value is not None:
a = a.filled(fill_value)
return np_func(a, *args, **kwargs)
doc = f"Like `numpy.{nanfuncname}`, skipping masked values as well.\n\n"
if fill_value is not None:
# sum, cumsum, prod, cumprod
doc += (f"Masked/NaN values are replaced with {fill_value}. "
"The output is not masked.")
elif "arg" in nanfuncname:
doc += ("No exceptions are raised for fully masked/NaN slices.\n"
"Instead, these give index 0.")
else:
doc += ("No warnings are given for fully masked/NaN slices.\n"
"Instead, they are masked in the output.")
nanfunc.__doc__ = doc
nanfunc.__name__ = nanfuncname
return nanfunc
for nanfuncname in np.lib.nanfunctions.__all__:
globals()[nanfuncname] = dispatched_function(masked_nanfunc(nanfuncname),
helps=getattr(np, nanfuncname))
# Add any dispatched or helper function that has a docstring to
# __all__, so they will be typeset by sphinx. The logic is that for
# those presumably the use of the mask is not entirely obvious.
__all__ += sorted(helper.__name__ for helper in (
set(APPLY_TO_BOTH_FUNCTIONS.values())
| set(DISPATCHED_FUNCTIONS.values())) if helper.__doc__)
|
60771ddfa1a7f6ed29079feabde2af2d1754418422e216152aa341df0cd4d1b1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a collection of monkey patches and workarounds for bugs in
earlier versions of Numpy.
"""
import numpy as np
from astropy.utils import minversion
__all__ = ['NUMPY_LT_1_19', 'NUMPY_LT_1_19_1', 'NUMPY_LT_1_20',
'NUMPY_LT_1_21_1', 'NUMPY_LT_1_22', 'NUMPY_LT_1_22_1',
'NUMPY_LT_1_23', 'NUMPY_LT_1_24']
# TODO: It might also be nice to have aliases to these named for specific
# features/bugs we're checking for (ex:
# astropy.table.table._BROKEN_UNICODE_TABLE_SORT)
NUMPY_LT_1_19 = not minversion(np, '1.19')
NUMPY_LT_1_19_1 = not minversion(np, '1.19.1')
NUMPY_LT_1_20 = not minversion(np, '1.20')
NUMPY_LT_1_21_1 = not minversion(np, '1.21.1')
NUMPY_LT_1_22 = not minversion(np, '1.22')
NUMPY_LT_1_22_1 = not minversion(np, '1.22.1')
NUMPY_LT_1_23 = not minversion(np, '1.23')
NUMPY_LT_1_24 = not minversion(np, '1.24dev0')
|
484a53b378a55661f1384b2c4f941f8571814023d0bc5ed9d522db31e608c58b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from astropy import units as u
from astropy.table import QTable, hstack, vstack, join
from astropy.utils.masked import Masked
from astropy.utils.compat.optional_deps import HAS_H5PY
from .test_masked import assert_masked_equal
FILE_FORMATS = ['ecsv', 'fits']
if HAS_H5PY:
FILE_FORMATS.append('h5')
class MaskedArrayTableSetup:
@classmethod
def setup_arrays(self):
self.a = np.array([3., 5., 0.])
self.mask_a = np.array([True, False, False])
@classmethod
def setup_class(self):
self.setup_arrays()
self.ma = Masked(self.a, mask=self.mask_a)
self.ma.info.format = '.1f'
self.t = QTable([self.ma], names=['ma'])
class MaskedQuantityTableSetup(MaskedArrayTableSetup):
@classmethod
def setup_arrays(self):
self.a = np.array([3., 5., 0.]) << u.m
self.mask_a = np.array([True, False, False])
class TestMaskedArrayTable(MaskedArrayTableSetup):
def test_table_initialization(self):
assert_array_equal(self.t['ma'].unmasked, self.a)
assert_array_equal(self.t['ma'].mask, self.mask_a)
assert repr(self.t).splitlines()[-3:] == [
' ———',
' 5.0',
' 0.0']
def test_info_basics(self):
assert self.t['ma'].info.name == 'ma'
assert 'serialize_method' in self.t['ma'].info.attr_names
t2 = self.t.copy()
t2['ma'].info.format = '.2f'
t2['ma'].info.serialize_method['fits'] = 'nonsense'
assert repr(t2).splitlines()[-3:] == [
' ———',
' 5.00',
' 0.00']
# Check that if we slice, things get copied over correctly.
t3 = t2[:2]
assert t3['ma'].info.name == 'ma'
assert t3['ma'].info.format == '.2f'
assert 'serialize_method' in t3['ma'].info.attr_names
assert t3['ma'].info.serialize_method['fits'] == 'nonsense'
@pytest.mark.parametrize('file_format', FILE_FORMATS)
def test_table_write(self, file_format, tmpdir):
name = str(tmpdir.join(f"a.{file_format}"))
kwargs = {}
if file_format == 'h5':
kwargs['path'] = 'trial'
kwargs['serialize_meta'] = True
self.t.write(name, **kwargs)
t2 = QTable.read(name)
assert isinstance(t2['ma'], self.ma.__class__)
assert np.all(t2['ma'] == self.ma)
assert np.all(t2['ma'].mask == self.mask_a)
if file_format == 'fits':
# Imperfect roundtrip through FITS native format description.
assert self.t['ma'].info.format in t2['ma'].info.format
else:
assert t2['ma'].info.format == self.t['ma'].info.format
@pytest.mark.parametrize('serialize_method', ['data_mask', 'null_value'])
def test_table_write_serialization(self, serialize_method, tmpdir):
name = str(tmpdir.join("test.ecsv"))
self.t.write(name, serialize_method=serialize_method)
with open(name) as fh:
lines = fh.readlines()
t2 = QTable.read(name)
assert isinstance(t2['ma'], self.ma.__class__)
if serialize_method == 'data_mask':
# Will data_mask, we have data and mask columns and should
# exactly round-trip.
assert len(lines[-1].split()) == 2
assert_masked_equal(t2['ma'], self.ma)
else:
# With null_value we have just a data column with null values
# marked, so we lost information on the data below the mask.
assert len(lines[-1].split()) == 1
assert np.all(t2['ma'] == self.ma)
assert np.all(t2['ma'].mask == self.mask_a)
def test_non_existing_serialize_method(self, tmpdir):
name = str(tmpdir.join('bad.ecsv'))
with pytest.raises(ValueError, match='serialize method must be'):
self.t.write(name, serialize_method='bad_serialize_method')
class TestMaskedQuantityTable(TestMaskedArrayTable, MaskedQuantityTableSetup):
# Runs tests from TestMaskedArrayTable as well as some extra ones.
def test_table_operations_requiring_masking(self):
t1 = self.t
t2 = QTable({'ma2': Masked([1, 2] * u.m)})
t12 = hstack([t1, t2], join_type='outer')
assert np.all(t12['ma'].mask == [True, False, False])
# 'ma2' is shorter by one so we expect one True from hstack so length matches
assert np.all(t12['ma2'].mask == [False, False, True])
t12 = hstack([t1, t2], join_type='inner')
assert np.all(t12['ma'].mask == [True, False])
assert np.all(t12['ma2'].mask == [False, False])
# Vstack tables with different column names. In this case we get masked
# values
t12 = vstack([t1, t2], join_type='outer')
# ma ma2
# m m
# --- ---
# —— ——
# 5.0 ——
# 0.0 ——
# —— 1.0
# —— 2.0
assert np.all(t12['ma'].mask == [True, False, False, True, True])
assert np.all(t12['ma2'].mask == [True, True, True, False, False])
def test_table_operations_requiring_masking_auto_promote(self):
MaskedQuantity = Masked(u.Quantity)
t1 = QTable({'ma1': [1, 2] * u.m})
t2 = QTable({'ma2': [3, 4, 5] * u.m})
t12 = hstack([t1, t2], join_type='outer')
assert isinstance(t12['ma1'], MaskedQuantity)
assert np.all(t12['ma1'].mask == [False, False, True])
assert np.all(t12['ma1'] == [1, 2, 0] * u.m)
assert not isinstance(t12['ma2'], MaskedQuantity)
assert isinstance(t12['ma2'], u.Quantity)
assert np.all(t12['ma2'] == [3, 4, 5] * u.m)
t12 = hstack([t1, t2], join_type='inner')
assert isinstance(t12['ma1'], u.Quantity)
assert not isinstance(t12['ma1'], MaskedQuantity)
assert isinstance(t12['ma2'], u.Quantity)
assert not isinstance(t12['ma2'], MaskedQuantity)
# Vstack tables with different column names. In this case we get masked
# values
t12 = vstack([t1, t2], join_type='outer')
assert np.all(t12['ma1'].mask == [False, False, True, True, True])
assert np.all(t12['ma2'].mask == [True, True, False, False, False])
t1['a'] = [1, 2]
t2['a'] = [1, 3, 4]
t12 = join(t1, t2, join_type='outer')
assert np.all(t12['ma1'].mask == [False, False, True, True])
assert np.all(t12['ma2'].mask == [False, True, False, False])
|
bf12a1c3f784033ff51608312be8b46df5527c0dc24760167bc3f4f5f6e9e6f7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test masked class initialization, methods, and operators.
Functions, including ufuncs, are tested in test_functions.py
"""
import operator
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from astropy import units as u
from astropy.units import Quantity
from astropy.coordinates import Longitude
from astropy.utils.masked import Masked, MaskedNDArray
from astropy.utils.compat import NUMPY_LT_1_20, NUMPY_LT_1_22
def assert_masked_equal(a, b):
assert_array_equal(a.unmasked, b.unmasked)
assert_array_equal(a.mask, b.mask)
VARIOUS_ITEMS = [
(1, 1),
slice(None, 1),
(),
1]
class ArraySetup:
_data_cls = np.ndarray
@classmethod
def setup_class(self):
self.a = np.arange(6.).reshape(2, 3)
self.mask_a = np.array([[True, False, False],
[False, True, False]])
self.b = np.array([-3., -2., -1.])
self.mask_b = np.array([False, True, False])
self.c = np.array([[0.25], [0.5]])
self.mask_c = np.array([[False], [True]])
self.sdt = np.dtype([('a', 'f8'), ('b', 'f8')])
self.mask_sdt = np.dtype([('a', '?'), ('b', '?')])
self.sa = np.array([[(1., 2.), (3., 4.)],
[(11., 12.), (13., 14.)]], dtype=self.sdt)
self.mask_sa = np.array([[(True, True), (False, False)],
[(False, True), (True, False)]],
dtype=self.mask_sdt)
self.sb = np.array([(1., 2.), (-3., 4.)], dtype=self.sdt)
self.mask_sb = np.array([(True, False), (False, False)],
dtype=self.mask_sdt)
self.scdt = np.dtype([('sa', '2f8'), ('sb', 'i8', (2, 2))])
self.sc = np.array([([1., 2.], [[1, 2], [3, 4]]),
([-1., -2.], [[-1, -2], [-3, -4]])],
dtype=self.scdt)
self.mask_scdt = np.dtype([('sa', '2?'), ('sb', '?', (2, 2))])
self.mask_sc = np.array([([True, False], [[False, False],
[True, True]]),
([False, True], [[True, False],
[False, True]])],
dtype=self.mask_scdt)
class QuantitySetup(ArraySetup):
_data_cls = Quantity
@classmethod
def setup_class(self):
super().setup_class()
self.a = Quantity(self.a, u.m)
self.b = Quantity(self.b, u.cm)
self.c = Quantity(self.c, u.km)
self.sa = Quantity(self.sa, u.m, dtype=self.sdt)
self.sb = Quantity(self.sb, u.cm, dtype=self.sdt)
class LongitudeSetup(ArraySetup):
_data_cls = Longitude
@classmethod
def setup_class(self):
super().setup_class()
self.a = Longitude(self.a, u.deg)
self.b = Longitude(self.b, u.deg)
self.c = Longitude(self.c, u.deg)
# Note: Longitude does not work on structured arrays, so
# leaving it as regular array (which just reruns some tests).
class TestMaskedArrayInitialization(ArraySetup):
def test_simple(self):
ma = Masked(self.a, mask=self.mask_a)
assert isinstance(ma, np.ndarray)
assert isinstance(ma, type(self.a))
assert isinstance(ma, Masked)
assert_array_equal(ma.unmasked, self.a)
assert_array_equal(ma.mask, self.mask_a)
assert ma.mask is not self.mask_a
assert np.may_share_memory(ma.mask, self.mask_a)
def test_structured(self):
ma = Masked(self.sa, mask=self.mask_sa)
assert isinstance(ma, np.ndarray)
assert isinstance(ma, type(self.sa))
assert isinstance(ma, Masked)
assert_array_equal(ma.unmasked, self.sa)
assert_array_equal(ma.mask, self.mask_sa)
assert ma.mask is not self.mask_sa
assert np.may_share_memory(ma.mask, self.mask_sa)
def test_masked_ndarray_init():
# Note: as a straight ndarray subclass, MaskedNDArray passes on
# the arguments relevant for np.ndarray, not np.array.
a_in = np.arange(3, dtype=int)
m_in = np.array([True, False, False])
buff = a_in.tobytes()
# Check we're doing things correctly using regular ndarray.
a = np.ndarray(shape=(3,), dtype=int, buffer=buff)
assert_array_equal(a, a_in)
# Check with and without mask.
ma = MaskedNDArray((3,), dtype=int, mask=m_in, buffer=buff)
assert_array_equal(ma.unmasked, a_in)
assert_array_equal(ma.mask, m_in)
ma = MaskedNDArray((3,), dtype=int, buffer=buff)
assert_array_equal(ma.unmasked, a_in)
assert_array_equal(ma.mask, np.zeros(3, bool))
def test_cannot_initialize_with_masked():
with pytest.raises(ValueError, match='cannot handle np.ma.masked'):
Masked(np.ma.masked)
def test_cannot_just_use_anything_with_a_mask_attribute():
class my_array(np.ndarray):
mask = True
a = np.array([1., 2.]).view(my_array)
with pytest.raises(AttributeError, match='unmasked'):
Masked(a)
class TestMaskedClassCreation:
"""Try creating a MaskedList and subclasses.
By no means meant to be realistic, just to check that the basic
machinery allows it.
"""
@classmethod
def setup_class(self):
self._base_classes_orig = Masked._base_classes.copy()
self._masked_classes_orig = Masked._masked_classes.copy()
class MaskedList(Masked, list, base_cls=list, data_cls=list):
def __new__(cls, *args, mask=None, copy=False, **kwargs):
self = super().__new__(cls)
self._unmasked = self._data_cls(*args, **kwargs)
self.mask = mask
return self
# Need to have shape for basics to work.
@property
def shape(self):
return (len(self._unmasked),)
self.MaskedList = MaskedList
def teardown_class(self):
Masked._base_classes = self._base_classes_orig
Masked._masked_classes = self._masked_classes_orig
def test_setup(self):
assert issubclass(self.MaskedList, Masked)
assert issubclass(self.MaskedList, list)
assert Masked(list) is self.MaskedList
def test_masked_list(self):
ml = self.MaskedList(range(3), mask=[True, False, False])
assert ml.unmasked == [0, 1, 2]
assert_array_equal(ml.mask, np.array([True, False, False]))
ml01 = ml[:2]
assert ml01.unmasked == [0, 1]
assert_array_equal(ml01.mask, np.array([True, False]))
def test_from_list(self):
ml = Masked([1, 2, 3], mask=[True, False, False])
assert ml.unmasked == [1, 2, 3]
assert_array_equal(ml.mask, np.array([True, False, False]))
def test_masked_list_subclass(self):
class MyList(list):
pass
ml = MyList(range(3))
mml = Masked(ml, mask=[False, True, False])
assert isinstance(mml, Masked)
assert isinstance(mml, MyList)
assert isinstance(mml.unmasked, MyList)
assert mml.unmasked == [0, 1, 2]
assert_array_equal(mml.mask, np.array([False, True, False]))
assert Masked(MyList) is type(mml)
class TestMaskedNDArraySubclassCreation:
"""Test that masked subclasses can be created directly and indirectly."""
@classmethod
def setup_class(self):
class MyArray(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.asanyarray(*args, **kwargs).view(cls)
self.MyArray = MyArray
self.a = np.array([1., 2.]).view(self.MyArray)
self.m = np.array([True, False], dtype=bool)
def teardown_method(self, method):
Masked._masked_classes.pop(self.MyArray, None)
def test_direct_creation(self):
assert self.MyArray not in Masked._masked_classes
mcls = Masked(self.MyArray)
assert issubclass(mcls, Masked)
assert issubclass(mcls, self.MyArray)
assert mcls.__name__ == 'MaskedMyArray'
assert mcls.__doc__.startswith('Masked version of MyArray')
mms = mcls(self.a, mask=self.m)
assert isinstance(mms, mcls)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
def test_initialization_without_mask(self):
# Default for not giving a mask should be False.
mcls = Masked(self.MyArray)
mms = mcls(self.a)
assert isinstance(mms, mcls)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, np.zeros(mms.shape, bool))
@pytest.mark.parametrize('masked_array', [Masked, np.ma.MaskedArray])
def test_initialization_with_masked_values(self, masked_array):
mcls = Masked(self.MyArray)
ma = masked_array(np.asarray(self.a), mask=self.m)
mms = mcls(ma)
assert isinstance(mms, Masked)
assert isinstance(mms, self.MyArray)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
def test_indirect_creation(self):
assert self.MyArray not in Masked._masked_classes
mms = Masked(self.a, mask=self.m)
assert isinstance(mms, Masked)
assert isinstance(mms, self.MyArray)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
assert self.MyArray in Masked._masked_classes
assert Masked(self.MyArray) is type(mms)
def test_can_initialize_with_masked_values(self):
mcls = Masked(self.MyArray)
mms = mcls(Masked(np.asarray(self.a), mask=self.m))
assert isinstance(mms, Masked)
assert isinstance(mms, self.MyArray)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
def test_viewing(self):
mms = Masked(self.a, mask=self.m)
mms2 = mms.view()
assert type(mms2) is mms.__class__
assert_masked_equal(mms2, mms)
ma = mms.view(np.ndarray)
assert type(ma) is MaskedNDArray
assert_array_equal(ma.unmasked, self.a.view(np.ndarray))
assert_array_equal(ma.mask, self.m)
class TestMaskedQuantityInitialization(TestMaskedArrayInitialization, QuantitySetup):
def test_masked_quantity_class_init(self):
# TODO: class definitions should be more easily accessible.
mcls = Masked._masked_classes[self.a.__class__]
# This is not a very careful test.
mq = mcls([1., 2.], mask=[True, False], unit=u.s)
assert mq.unit == u.s
assert np.all(mq.value.unmasked == [1., 2.])
assert np.all(mq.value.mask == [True, False])
assert np.all(mq.mask == [True, False])
def test_masked_quantity_getting(self):
mcls = Masked._masked_classes[self.a.__class__]
MQ = Masked(Quantity)
assert MQ is mcls
def test_initialization_without_mask(self):
# Default for not giving a mask should be False.
MQ = Masked(Quantity)
mq = MQ([1., 2.], u.s)
assert mq.unit == u.s
assert np.all(mq.value.unmasked == [1., 2.])
assert np.all(mq.mask == [False, False])
@pytest.mark.parametrize('masked_array', [Masked, np.ma.MaskedArray])
def test_initialization_with_masked_values(self, masked_array):
MQ = Masked(Quantity)
a = np.array([1., 2.])
m = np.array([True, False])
ma = masked_array(a, m)
mq = MQ(ma)
assert isinstance(mq, Masked)
assert isinstance(mq, Quantity)
assert_array_equal(mq.value.unmasked, a)
assert_array_equal(mq.mask, m)
class TestMaskSetting(ArraySetup):
def test_whole_mask_setting_simple(self):
ma = Masked(self.a)
assert ma.mask.shape == ma.shape
assert not ma.mask.any()
ma.mask = True
assert ma.mask.shape == ma.shape
assert ma.mask.all()
ma.mask = [[True], [False]]
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, np.array([[True] * 3, [False] * 3]))
ma.mask = self.mask_a
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, self.mask_a)
assert ma.mask is not self.mask_a
assert np.may_share_memory(ma.mask, self.mask_a)
def test_whole_mask_setting_structured(self):
ma = Masked(self.sa)
assert ma.mask.shape == ma.shape
assert not ma.mask['a'].any() and not ma.mask['b'].any()
ma.mask = True
assert ma.mask.shape == ma.shape
assert ma.mask['a'].all() and ma.mask['b'].all()
ma.mask = [[True], [False]]
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, np.array(
[[(True, True)] * 2, [(False, False)] * 2], dtype=self.mask_sdt))
ma.mask = self.mask_sa
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, self.mask_sa)
assert ma.mask is not self.mask_sa
assert np.may_share_memory(ma.mask, self.mask_sa)
@pytest.mark.parametrize('item', VARIOUS_ITEMS)
def test_part_mask_setting(self, item):
ma = Masked(self.a)
ma.mask[item] = True
expected = np.zeros(ma.shape, bool)
expected[item] = True
assert_array_equal(ma.mask, expected)
ma.mask[item] = False
assert_array_equal(ma.mask, np.zeros(ma.shape, bool))
# Mask propagation
mask = np.zeros(self.a.shape, bool)
ma = Masked(self.a, mask)
ma.mask[item] = True
assert np.may_share_memory(ma.mask, mask)
assert_array_equal(ma.mask, mask)
@pytest.mark.parametrize('item', ['a'] + VARIOUS_ITEMS)
def test_part_mask_setting_structured(self, item):
ma = Masked(self.sa)
ma.mask[item] = True
expected = np.zeros(ma.shape, self.mask_sdt)
expected[item] = True
assert_array_equal(ma.mask, expected)
ma.mask[item] = False
assert_array_equal(ma.mask, np.zeros(ma.shape, self.mask_sdt))
# Mask propagation
mask = np.zeros(self.sa.shape, self.mask_sdt)
ma = Masked(self.sa, mask)
ma.mask[item] = True
assert np.may_share_memory(ma.mask, mask)
assert_array_equal(ma.mask, mask)
# Following are tests where we trust the initializer works.
class MaskedArraySetup(ArraySetup):
@classmethod
def setup_class(self):
super().setup_class()
self.ma = Masked(self.a, mask=self.mask_a)
self.mb = Masked(self.b, mask=self.mask_b)
self.mc = Masked(self.c, mask=self.mask_c)
self.msa = Masked(self.sa, mask=self.mask_sa)
self.msb = Masked(self.sb, mask=self.mask_sb)
self.msc = Masked(self.sc, mask=self.mask_sc)
class TestViewing(MaskedArraySetup):
def test_viewing_as_new_type(self):
ma2 = self.ma.view(type(self.ma))
assert_masked_equal(ma2, self.ma)
ma3 = self.ma.view()
assert_masked_equal(ma3, self.ma)
def test_viewing_as_new_dtype(self):
# Not very meaningful, but possible...
ma2 = self.ma.view('c8')
assert_array_equal(ma2.unmasked, self.a.view('c8'))
assert_array_equal(ma2.mask, self.mask_a)
@pytest.mark.parametrize('new_dtype', ['2f4', 'f8,f8,f8'])
def test_viewing_as_new_dtype_not_implemented(self, new_dtype):
# But cannot (yet) view in way that would need to create a new mask,
# even though that view is possible for a regular array.
check = self.a.view(new_dtype)
with pytest.raises(NotImplementedError, match='different.*size'):
self.ma.view(check.dtype)
def test_viewing_as_something_impossible(self):
with pytest.raises(TypeError):
# Use intp to ensure have the same size as object,
# otherwise we get a different error message
Masked(np.array([1, 2], dtype=np.intp)).view(Masked)
class TestMaskedArrayCopyFilled(MaskedArraySetup):
def test_copy(self):
ma_copy = self.ma.copy()
assert type(ma_copy) is type(self.ma)
assert_array_equal(ma_copy.unmasked, self.ma.unmasked)
assert_array_equal(ma_copy.mask, self.ma.mask)
assert not np.may_share_memory(ma_copy.unmasked, self.ma.unmasked)
assert not np.may_share_memory(ma_copy.mask, self.ma.mask)
@pytest.mark.parametrize('fill_value', (0, 1))
def test_filled(self, fill_value):
fill_value = fill_value * getattr(self.a, 'unit', 1)
expected = self.a.copy()
expected[self.ma.mask] = fill_value
result = self.ma.filled(fill_value)
assert_array_equal(expected, result)
def test_filled_no_fill_value(self):
with pytest.raises(TypeError, match='missing 1 required'):
self.ma.filled()
@pytest.mark.parametrize('fill_value', [(0, 1), (-1, -1)])
def test_filled_structured(self, fill_value):
fill_value = np.array(fill_value, dtype=self.sdt)
if hasattr(self.sa, 'unit'):
fill_value = fill_value << self.sa.unit
expected = self.sa.copy()
expected['a'][self.msa.mask['a']] = fill_value['a']
expected['b'][self.msa.mask['b']] = fill_value['b']
result = self.msa.filled(fill_value)
assert_array_equal(expected, result)
def test_flat(self):
ma_copy = self.ma.copy()
ma_flat = ma_copy.flat
# Check that single item keeps class and mask
ma_flat1 = ma_flat[1]
assert ma_flat1.unmasked == self.a.flat[1]
assert ma_flat1.mask == self.mask_a.flat[1]
# As well as getting items via iteration.
assert all((ma.unmasked == a and ma.mask == m) for (ma, a, m)
in zip(self.ma.flat, self.a.flat, self.mask_a.flat))
# check that flat works like a view of the real array
ma_flat[1] = self.b[1]
assert ma_flat[1] == self.b[1]
assert ma_copy[0, 1] == self.b[1]
class TestMaskedQuantityCopyFilled(TestMaskedArrayCopyFilled, QuantitySetup):
pass
class TestMaskedLongitudeCopyFilled(TestMaskedArrayCopyFilled, LongitudeSetup):
pass
class TestMaskedArrayShaping(MaskedArraySetup):
def test_reshape(self):
ma_reshape = self.ma.reshape((6,))
expected_data = self.a.reshape((6,))
expected_mask = self.mask_a.reshape((6,))
assert ma_reshape.shape == expected_data.shape
assert_array_equal(ma_reshape.unmasked, expected_data)
assert_array_equal(ma_reshape.mask, expected_mask)
def test_shape_setting(self):
ma_reshape = self.ma.copy()
ma_reshape.shape = 6,
expected_data = self.a.reshape((6,))
expected_mask = self.mask_a.reshape((6,))
assert ma_reshape.shape == expected_data.shape
assert_array_equal(ma_reshape.unmasked, expected_data)
assert_array_equal(ma_reshape.mask, expected_mask)
def test_shape_setting_failure(self):
ma = self.ma.copy()
with pytest.raises(ValueError, match='cannot reshape'):
ma.shape = 5,
assert ma.shape == self.ma.shape
assert ma.mask.shape == self.ma.shape
# Here, mask can be reshaped but array cannot.
ma2 = Masked(np.broadcast_to([[1.], [2.]], self.a.shape),
mask=self.mask_a)
with pytest.raises(AttributeError, match='ncompatible shape'):
ma2.shape = 6,
assert ma2.shape == self.ma.shape
assert ma2.mask.shape == self.ma.shape
# Here, array can be reshaped but mask cannot.
ma3 = Masked(self.a.copy(), mask=np.broadcast_to([[True], [False]],
self.mask_a.shape))
with pytest.raises(AttributeError, match='ncompatible shape'):
ma3.shape = 6,
assert ma3.shape == self.ma.shape
assert ma3.mask.shape == self.ma.shape
def test_ravel(self):
ma_ravel = self.ma.ravel()
expected_data = self.a.ravel()
expected_mask = self.mask_a.ravel()
assert ma_ravel.shape == expected_data.shape
assert_array_equal(ma_ravel.unmasked, expected_data)
assert_array_equal(ma_ravel.mask, expected_mask)
def test_transpose(self):
ma_transpose = self.ma.transpose()
expected_data = self.a.transpose()
expected_mask = self.mask_a.transpose()
assert ma_transpose.shape == expected_data.shape
assert_array_equal(ma_transpose.unmasked, expected_data)
assert_array_equal(ma_transpose.mask, expected_mask)
def test_iter(self):
for ma, d, m in zip(self.ma, self.a, self.mask_a):
assert_array_equal(ma.unmasked, d)
assert_array_equal(ma.mask, m)
class MaskedItemTests(MaskedArraySetup):
@pytest.mark.parametrize('item', VARIOUS_ITEMS)
def test_getitem(self, item):
ma_part = self.ma[item]
expected_data = self.a[item]
expected_mask = self.mask_a[item]
assert_array_equal(ma_part.unmasked, expected_data)
assert_array_equal(ma_part.mask, expected_mask)
@pytest.mark.parametrize('item', ['a'] + VARIOUS_ITEMS)
def test_getitem_structured(self, item):
ma_part = self.msa[item]
expected_data = self.sa[item]
expected_mask = self.mask_sa[item]
assert_array_equal(ma_part.unmasked, expected_data)
assert_array_equal(ma_part.mask, expected_mask)
@pytest.mark.parametrize('indices,axis', [
([0, 1], 1), ([0, 1], 0), ([0, 1], None), ([[0, 1], [2, 3]], None)])
def test_take(self, indices, axis):
ma_take = self.ma.take(indices, axis=axis)
expected_data = self.a.take(indices, axis=axis)
expected_mask = self.mask_a.take(indices, axis=axis)
assert_array_equal(ma_take.unmasked, expected_data)
assert_array_equal(ma_take.mask, expected_mask)
ma_take2 = np.take(self.ma, indices, axis=axis)
assert_masked_equal(ma_take2, ma_take)
@pytest.mark.parametrize('item', VARIOUS_ITEMS)
@pytest.mark.parametrize('mask', [None, True, False])
def test_setitem(self, item, mask):
base = self.ma.copy()
expected_data = self.a.copy()
expected_mask = self.mask_a.copy()
value = self.a[0, 0] if mask is None else Masked(self.a[0, 0], mask)
base[item] = value
expected_data[item] = value if mask is None else value.unmasked
expected_mask[item] = False if mask is None else value.mask
assert_array_equal(base.unmasked, expected_data)
assert_array_equal(base.mask, expected_mask)
@pytest.mark.parametrize('item', ['a'] + VARIOUS_ITEMS)
@pytest.mark.parametrize('mask', [None, True, False])
def test_setitem_structured(self, item, mask):
base = self.msa.copy()
expected_data = self.sa.copy()
expected_mask = self.mask_sa.copy()
value = self.sa['b'] if item == 'a' else self.sa[0, 0]
if mask is not None:
value = Masked(value, mask)
base[item] = value
expected_data[item] = value if mask is None else value.unmasked
expected_mask[item] = False if mask is None else value.mask
assert_array_equal(base.unmasked, expected_data)
assert_array_equal(base.mask, expected_mask)
@pytest.mark.parametrize('item', VARIOUS_ITEMS)
def test_setitem_np_ma_masked(self, item):
base = self.ma.copy()
expected_mask = self.mask_a.copy()
base[item] = np.ma.masked
expected_mask[item] = True
assert_array_equal(base.unmasked, self.a)
assert_array_equal(base.mask, expected_mask)
class TestMaskedArrayItems(MaskedItemTests):
@classmethod
def setup_class(self):
super().setup_class()
self.d = np.array(['aa', 'bb'])
self.mask_d = np.array([True, False])
self.md = Masked(self.d, self.mask_d)
# Quantity, Longitude cannot hold strings.
def test_getitem_strings(self):
md = self.md.copy()
md0 = md[0]
assert md0.unmasked == self.d[0]
assert md0.mask
md_all = md[:]
assert_masked_equal(md_all, md)
def test_setitem_strings_np_ma_masked(self):
md = self.md.copy()
md[1] = np.ma.masked
assert_array_equal(md.unmasked, self.d)
assert_array_equal(md.mask, np.ones(2, bool))
class TestMaskedQuantityItems(MaskedItemTests, QuantitySetup):
pass
class TestMaskedLongitudeItems(MaskedItemTests, LongitudeSetup):
pass
class MaskedOperatorTests(MaskedArraySetup):
@pytest.mark.parametrize('op', (operator.add, operator.sub))
def test_add_subtract(self, op):
mapmb = op(self.ma, self.mb)
expected_data = op(self.a, self.b)
expected_mask = (self.ma.mask | self.mb.mask)
# Note: assert_array_equal also checks type, i.e., that, e.g.,
# Longitude decays into an Angle.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
@pytest.mark.parametrize('op', (operator.eq, operator.ne))
def test_equality(self, op):
mapmb = op(self.ma, self.mb)
expected_data = op(self.a, self.b)
expected_mask = (self.ma.mask | self.mb.mask)
# Note: assert_array_equal also checks type, i.e., that boolean
# output is represented as plain Masked ndarray.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
def test_not_implemented(self):
with pytest.raises(TypeError):
self.ma > 'abc'
@pytest.mark.parametrize('different_names', [False, True])
@pytest.mark.parametrize('op', (operator.eq, operator.ne))
def test_structured_equality(self, op, different_names):
msb = self.msb
if different_names:
msb = msb.astype([(f'different_{name}', dt)
for name, dt in msb.dtype.fields.items()])
mapmb = op(self.msa, self.msb)
# Expected is a bit tricky here: only unmasked fields count
expected_data = np.ones(mapmb.shape, bool)
expected_mask = np.ones(mapmb.shape, bool)
for field in self.sdt.names:
fa, mfa = self.sa[field], self.mask_sa[field]
fb, mfb = self.sb[field], self.mask_sb[field]
mfequal = mfa | mfb
fequal = (fa == fb) | mfequal
expected_data &= fequal
expected_mask &= mfequal
if op is operator.ne:
expected_data = ~expected_data
# Note: assert_array_equal also checks type, i.e., that boolean
# output is represented as plain Masked ndarray.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
def test_matmul(self):
result = self.ma.T @ self.ma
assert_array_equal(result.unmasked, self.a.T @ self.a)
mask1 = np.any(self.mask_a, axis=0)
expected_mask = np.logical_or.outer(mask1, mask1)
assert_array_equal(result.mask, expected_mask)
result2 = self.ma.T @ self.a
assert_array_equal(result2.unmasked, self.a.T @ self.a)
expected_mask2 = np.logical_or.outer(mask1, np.zeros(3, bool))
assert_array_equal(result2.mask, expected_mask2)
result3 = self.a.T @ self.ma
assert_array_equal(result3.unmasked, self.a.T @ self.a)
expected_mask3 = np.logical_or.outer(np.zeros(3, bool), mask1)
assert_array_equal(result3.mask, expected_mask3)
def test_matvec(self):
result = self.ma @ self.mb
assert np.all(result.mask)
assert_array_equal(result.unmasked, self.a @ self.b)
# Just using the masked vector still has all elements masked.
result2 = self.a @ self.mb
assert np.all(result2.mask)
assert_array_equal(result2.unmasked, self.a @ self.b)
new_ma = self.ma.copy()
new_ma.mask[0, 0] = False
result3 = new_ma @ self.b
assert_array_equal(result3.unmasked, self.a @ self.b)
assert_array_equal(result3.mask, new_ma.mask.any(-1))
def test_vecmat(self):
result = self.mb @ self.ma.T
assert np.all(result.mask)
assert_array_equal(result.unmasked, self.b @ self.a.T)
result2 = self.b @ self.ma.T
assert np.all(result2.mask)
assert_array_equal(result2.unmasked, self.b @ self.a.T)
new_ma = self.ma.T.copy()
new_ma.mask[0, 0] = False
result3 = self.b @ new_ma
assert_array_equal(result3.unmasked, self.b @ self.a.T)
assert_array_equal(result3.mask, new_ma.mask.any(0))
def test_vecvec(self):
result = self.mb @ self.mb
assert result.shape == ()
assert result.mask
assert result.unmasked == self.b @ self.b
mb_no_mask = Masked(self.b, False)
result2 = mb_no_mask @ mb_no_mask
assert not result2.mask
class TestMaskedArrayOperators(MaskedOperatorTests):
# Some further tests that use strings, which are not useful for Quantity.
@pytest.mark.parametrize('op', (operator.eq, operator.ne))
def test_equality_strings(self, op):
m1 = Masked(np.array(['a', 'b', 'c']), mask=[True, False, False])
m2 = Masked(np.array(['a', 'b', 'd']), mask=[False, False, False])
result = op(m1, m2)
assert_array_equal(result.unmasked, op(m1.unmasked, m2.unmasked))
assert_array_equal(result.mask, m1.mask | m2.mask)
result2 = op(m1, m2.unmasked)
assert_masked_equal(result2, result)
def test_not_implemented(self):
with pytest.raises(TypeError):
Masked(['a', 'b']) > object()
class TestMaskedQuantityOperators(MaskedOperatorTests, QuantitySetup):
pass
class TestMaskedLongitudeOperators(MaskedOperatorTests, LongitudeSetup):
pass
class TestMaskedArrayMethods(MaskedArraySetup):
def test_round(self):
# Goes via ufunc, hence easy.
mrc = self.mc.round()
expected = Masked(self.c.round(), self.mask_c)
assert_masked_equal(mrc, expected)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_sum(self, axis):
ma_sum = self.ma.sum(axis)
expected_data = self.a.sum(axis)
expected_mask = self.ma.mask.any(axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_sum_where(self, axis):
where = np.array([
[True, False, False, ],
[True, True, True, ],
])
where_final = ~self.ma.mask & where
ma_sum = self.ma.sum(axis, where=where_final)
expected_data = self.ma.unmasked.sum(axis, where=where_final)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_cumsum(self, axis):
ma_sum = self.ma.cumsum(axis)
expected_data = self.a.cumsum(axis)
mask = self.mask_a
if axis is None:
mask = mask.ravel()
expected_mask = np.logical_or.accumulate(mask, axis=axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_mean(self, axis):
ma_mean = self.ma.mean(axis)
filled = self.a.copy()
filled[self.mask_a] = 0.
count = 1 - self.ma.mask.astype(int)
expected_data = filled.sum(axis) / count.sum(axis)
expected_mask = self.ma.mask.all(axis)
assert_array_equal(ma_mean.unmasked, expected_data)
assert_array_equal(ma_mean.mask, expected_mask)
def test_mean_int16(self):
ma = self.ma.astype('i2')
ma_mean = ma.mean()
assert ma_mean.dtype == 'f8'
expected = ma.astype('f8').mean()
assert_masked_equal(ma_mean, expected)
def test_mean_float16(self):
ma = self.ma.astype('f2')
ma_mean = ma.mean()
assert ma_mean.dtype == 'f2'
expected = self.ma.mean().astype('f2')
assert_masked_equal(ma_mean, expected)
def test_mean_inplace(self):
expected = self.ma.mean(1)
out = Masked(np.zeros_like(expected.unmasked))
result = self.ma.mean(1, out=out)
assert result is out
assert_masked_equal(out, expected)
@pytest.mark.xfail(NUMPY_LT_1_20, reason="'where' keyword argument not supported for numpy < 1.20")
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.filterwarnings("ignore:Mean of empty slice")
@pytest.mark.parametrize('axis', (0, 1, None))
def test_mean_where(self, axis):
where = np.array([
[True, False, False, ],
[True, True, True, ],
])
where_final = ~self.ma.mask & where
ma_mean = self.ma.mean(axis, where=where)
expected_data = self.ma.unmasked.mean(axis, where=where_final)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)
assert_array_equal(ma_mean.unmasked, expected_data)
assert_array_equal(ma_mean.mask, expected_mask)
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.parametrize('axis', (0, 1, None))
def test_var(self, axis):
ma_var = self.ma.var(axis)
filled = (self.a - self.ma.mean(axis, keepdims=True))**2
filled[self.mask_a] = 0.
count = (1 - self.ma.mask.astype(int)).sum(axis)
expected_data = filled.sum(axis) / count
expected_mask = self.ma.mask.all(axis)
assert_array_equal(ma_var.unmasked, expected_data)
assert_array_equal(ma_var.mask, expected_mask)
ma_var1 = self.ma.var(axis, ddof=1)
expected_data1 = filled.sum(axis) / (count - 1)
expected_mask1 = self.ma.mask.all(axis) | (count <= 1)
assert_array_equal(ma_var1.unmasked, expected_data1)
assert_array_equal(ma_var1.mask, expected_mask1)
ma_var5 = self.ma.var(axis, ddof=5)
assert np.all(~np.isfinite(ma_var5.unmasked))
assert ma_var5.mask.all()
def test_var_int16(self):
ma = self.ma.astype('i2')
ma_var = ma.var()
assert ma_var.dtype == 'f8'
expected = ma.astype('f8').var()
assert_masked_equal(ma_var, expected)
@pytest.mark.xfail(NUMPY_LT_1_20, reason="'where' keyword argument not supported for numpy < 1.20")
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.filterwarnings("ignore:Degrees of freedom <= 0 for slice")
@pytest.mark.parametrize('axis', (0, 1, None))
def test_var_where(self, axis):
where = np.array([
[True, False, False, ],
[True, True, True, ],
])
where_final = ~self.ma.mask & where
ma_var = self.ma.var(axis, where=where)
expected_data = self.ma.unmasked.var(axis, where=where_final)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)
assert_array_equal(ma_var.unmasked, expected_data)
assert_array_equal(ma_var.mask, expected_mask)
def test_std(self):
ma_std = self.ma.std(1, ddof=1)
ma_var1 = self.ma.var(1, ddof=1)
expected = np.sqrt(ma_var1)
assert_masked_equal(ma_std, expected)
def test_std_inplace(self):
expected = self.ma.std(1, ddof=1)
out = Masked(np.zeros_like(expected.unmasked))
result = self.ma.std(1, ddof=1, out=out)
assert result is out
assert_masked_equal(result, expected)
@pytest.mark.xfail(NUMPY_LT_1_20, reason="'where' keyword argument not supported for numpy < 1.20")
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.filterwarnings("ignore:Degrees of freedom <= 0 for slice")
@pytest.mark.parametrize('axis', (0, 1, None))
def test_std_where(self, axis):
where = np.array([
[True, False, False, ],
[True, True, True, ],
])
where_final = ~self.ma.mask & where
ma_std = self.ma.std(axis, where=where)
expected_data = self.ma.unmasked.std(axis, where=where_final)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)
assert_array_equal(ma_std.unmasked, expected_data)
assert_array_equal(ma_std.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_min(self, axis):
ma_min = self.ma.min(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.max()
expected_data = filled.min(axis)
assert_array_equal(ma_min.unmasked, expected_data)
assert not np.any(ma_min.mask)
def test_min_with_masked_nan(self):
ma = Masked([3., np.nan, 2.], mask=[False, True, False])
ma_min = ma.min()
assert_array_equal(ma_min.unmasked, np.array(2.))
assert not ma_min.mask
@pytest.mark.parametrize('axis', (0, 1, None))
def test_min_where(self, axis):
where = np.array([
[True, False, False, ],
[True, True, True, ],
])
where_final = ~self.ma.mask & where
ma_min = self.ma.min(axis, where=where_final, initial=np.inf)
expected_data = self.ma.unmasked.min(axis, where=where_final, initial=np.inf)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)
assert_array_equal(ma_min.unmasked, expected_data)
assert_array_equal(ma_min.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_max(self, axis):
ma_max = self.ma.max(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.min()
expected_data = filled.max(axis)
assert_array_equal(ma_max.unmasked, expected_data)
assert not np.any(ma_max.mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_max_where(self, axis):
where = np.array([
[True, False, False, ],
[True, True, True, ],
])
where_final = ~self.ma.mask & where
ma_max = self.ma.max(axis, where=where_final, initial=-np.inf)
expected_data = self.ma.unmasked.max(axis, where=where_final, initial=-np.inf)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)
assert_array_equal(ma_max.unmasked, expected_data)
assert_array_equal(ma_max.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_argmin(self, axis):
ma_argmin = self.ma.argmin(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.max()
expected_data = filled.argmin(axis)
assert_array_equal(ma_argmin, expected_data)
def test_argmin_only_one_unmasked_element(self):
# Regression test for example from @taldcroft at
# https://github.com/astropy/astropy/pull/11127#discussion_r600864559
ma = Masked(data=[1, 2], mask=[True, False])
assert ma.argmin() == 1
if not NUMPY_LT_1_22:
def test_argmin_keepdims(self):
ma = Masked(data=[[1, 2], [3, 4]], mask=[[True, False], [False, False]])
assert_array_equal(ma.argmin(axis=0, keepdims=True),
np.array([[1, 0]]))
@pytest.mark.parametrize('axis', (0, 1, None))
def test_argmax(self, axis):
ma_argmax = self.ma.argmax(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.min()
expected_data = filled.argmax(axis)
assert_array_equal(ma_argmax, expected_data)
if not NUMPY_LT_1_22:
def test_argmax_keepdims(self):
ma = Masked(data=[[1, 2], [3, 4]], mask=[[True, False], [False, False]])
assert_array_equal(ma.argmax(axis=1, keepdims=True),
np.array([[1], [1]]))
@pytest.mark.parametrize('axis', (0, 1, None))
def test_argsort(self, axis):
ma_argsort = self.ma.argsort(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.max() * 1.1
expected_data = filled.argsort(axis)
assert_array_equal(ma_argsort, expected_data)
@pytest.mark.parametrize('order', [None, 'a', ('a', 'b'), ('b', 'a')])
@pytest.mark.parametrize('axis', [0, 1])
def test_structured_argsort(self, axis, order):
ma_argsort = self.msa.argsort(axis, order=order)
filled = self.msa.filled(fill_value=np.array((np.inf, np.inf),
dtype=self.sdt))
expected_data = filled.argsort(axis, order=order)
assert_array_equal(ma_argsort, expected_data)
def test_argsort_error(self):
with pytest.raises(ValueError, match='when the array has no fields'):
self.ma.argsort(axis=0, order='a')
@pytest.mark.parametrize('axis', (0, 1))
def test_sort(self, axis):
ma_sort = self.ma.copy()
ma_sort.sort(axis)
indices = self.ma.argsort(axis)
expected_data = np.take_along_axis(self.ma.unmasked, indices, axis)
expected_mask = np.take_along_axis(self.ma.mask, indices, axis)
assert_array_equal(ma_sort.unmasked, expected_data)
assert_array_equal(ma_sort.mask, expected_mask)
@pytest.mark.parametrize('kth', [1, 3])
def test_argpartition(self, kth):
ma = self.ma.ravel()
ma_argpartition = ma.argpartition(kth)
partitioned = ma[ma_argpartition]
assert (partitioned[:kth] < partitioned[kth]).all()
assert (partitioned[kth:] >= partitioned[kth]).all()
if partitioned[kth].mask:
assert all(partitioned.mask[kth:])
else:
assert not any(partitioned.mask[:kth])
@pytest.mark.parametrize('kth', [1, 3])
def test_partition(self, kth):
partitioned = self.ma.flatten()
partitioned.partition(kth)
assert (partitioned[:kth] < partitioned[kth]).all()
assert (partitioned[kth:] >= partitioned[kth]).all()
if partitioned[kth].mask:
assert all(partitioned.mask[kth:])
else:
assert not any(partitioned.mask[:kth])
def test_all_explicit(self):
a1 = np.array([[1., 2.],
[3., 4.]])
a2 = np.array([[1., 0.],
[3., 4.]])
if self._data_cls is not np.ndarray:
a1 = self._data_cls(a1, self.a.unit)
a2 = self._data_cls(a2, self.a.unit)
ma1 = Masked(a1, mask=[[False, False],
[True, True]])
ma2 = Masked(a2, mask=[[False, True],
[False, True]])
ma1_eq_ma2 = ma1 == ma2
assert_array_equal(ma1_eq_ma2.unmasked, np.array([[True, False],
[True, True]]))
assert_array_equal(ma1_eq_ma2.mask, np.array([[False, True],
[True, True]]))
assert ma1_eq_ma2.all()
assert not (ma1 != ma2).all()
ma_eq1 = ma1_eq_ma2.all(1)
assert_array_equal(ma_eq1.mask, np.array([False, True]))
assert bool(ma_eq1[0]) is True
assert bool(ma_eq1[1]) is False
ma_eq0 = ma1_eq_ma2.all(0)
assert_array_equal(ma_eq0.mask, np.array([False, True]))
assert bool(ma_eq1[0]) is True
assert bool(ma_eq1[1]) is False
@pytest.mark.parametrize('method', ['any', 'all'])
@pytest.mark.parametrize('array,axis', [
('a', 0), ('a', 1), ('a', None),
('b', None),
('c', 0), ('c', 1), ('c', None)])
def test_all_and_any(self, array, axis, method):
ma = getattr(self, 'm'+array)
ma_eq = ma == ma
ma_all_or_any = getattr(ma_eq, method)(axis=axis)
filled = ma_eq.unmasked.copy()
filled[ma_eq.mask] = method == 'all'
a_all_or_any = getattr(filled, method)(axis=axis)
all_masked = ma.mask.all(axis)
assert_array_equal(ma_all_or_any.mask, all_masked)
assert_array_equal(ma_all_or_any.unmasked, a_all_or_any)
# interpretation as bool
as_bool = [bool(a) for a in ma_all_or_any.ravel()]
expected = [bool(a) for a in (a_all_or_any & ~all_masked).ravel()]
assert as_bool == expected
def test_any_inplace(self):
ma_eq = self.ma == self.ma
expected = ma_eq.any(1)
out = Masked(np.zeros_like(expected.unmasked))
result = ma_eq.any(1, out=out)
assert result is out
assert_masked_equal(result, expected)
@pytest.mark.xfail(NUMPY_LT_1_20, reason="'where' keyword argument not supported for numpy < 1.20")
@pytest.mark.parametrize('method', ('all', 'any'))
@pytest.mark.parametrize('axis', (0, 1, None))
def test_all_and_any_where(self, method, axis):
where = np.array([
[True, False, False, ],
[True, True, True, ],
])
where_final = ~self.ma.mask & where
ma_eq = self.ma == self.ma
ma_any = getattr(ma_eq, method)(axis, where=where)
expected_data = getattr(ma_eq.unmasked, method)(axis, where=where_final)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)
assert_array_equal(ma_any.unmasked, expected_data)
assert_array_equal(ma_any.mask, expected_mask)
@pytest.mark.parametrize('offset', (0, 1))
def test_diagonal(self, offset):
mda = self.ma.diagonal(offset=offset)
expected = Masked(self.a.diagonal(offset=offset),
self.mask_a.diagonal(offset=offset))
assert_masked_equal(mda, expected)
@pytest.mark.parametrize('offset', (0, 1))
def test_trace(self, offset):
mta = self.ma.trace(offset=offset)
expected = Masked(self.a.trace(offset=offset),
self.mask_a.trace(offset=offset, dtype=bool))
assert_masked_equal(mta, expected)
def test_clip(self):
maclip = self.ma.clip(self.b, self.c)
expected = Masked(self.a.clip(self.b, self.c), self.mask_a)
assert_masked_equal(maclip, expected)
def test_clip_masked_min_max(self):
maclip = self.ma.clip(self.mb, self.mc)
# Need to be careful with min, max because of Longitude, which wraps.
dmax = np.maximum(np.maximum(self.a, self.b), self.c).max()
dmin = np.minimum(np.minimum(self.a, self.b), self.c).min()
expected = Masked(self.a.clip(self.mb.filled(dmin),
self.mc.filled(dmax)),
mask=self.mask_a)
assert_masked_equal(maclip, expected)
class TestMaskedQuantityMethods(TestMaskedArrayMethods, QuantitySetup):
pass
class TestMaskedLongitudeMethods(TestMaskedArrayMethods, LongitudeSetup):
pass
class TestMaskedArrayProductMethods(MaskedArraySetup):
# These cannot work on Quantity, so done separately
@pytest.mark.parametrize('axis', (0, 1, None))
def test_prod(self, axis):
ma_sum = self.ma.prod(axis)
expected_data = self.a.prod(axis)
expected_mask = self.ma.mask.any(axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_cumprod(self, axis):
ma_sum = self.ma.cumprod(axis)
expected_data = self.a.cumprod(axis)
mask = self.mask_a
if axis is None:
mask = mask.ravel()
expected_mask = np.logical_or.accumulate(mask, axis=axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
def test_masked_str_explicit():
sa = np.array([(1., 2.), (3., 4.)], dtype='f8,f8')
msa = Masked(sa, [(False, True), (False, False)])
assert str(msa) == "[(1., ——) (3., 4.)]"
assert str(msa[0]) == "(1., ——)"
assert str(msa[1]) == "(3., 4.)"
with np.printoptions(precision=3, floatmode='fixed'):
assert str(msa) == "[(1.000, ———) (3.000, 4.000)]"
def test_masked_repr_explicit():
# Use explicit endianness to ensure tests pass on all architectures
sa = np.array([(1., 2.), (3., 4.)], dtype='>f8,>f8')
msa = Masked(sa, [(False, True), (False, False)])
assert repr(msa) == ("MaskedNDArray([(1., ——), (3., 4.)], "
"dtype=[('f0', '>f8'), ('f1', '>f8')])")
assert repr(msa[0]) == ("MaskedNDArray((1., ——), "
"dtype=[('f0', '>f8'), ('f1', '>f8')])")
assert repr(msa[1]) == ("MaskedNDArray((3., 4.), "
"dtype=[('f0', '>f8'), ('f1', '>f8')])")
def test_masked_repr_summary():
ma = Masked(np.arange(15.), mask=[True]+[False]*14)
with np.printoptions(threshold=2):
assert repr(ma) == (
"MaskedNDArray([———, 1., 2., ..., 12., 13., 14.])")
def test_masked_repr_nodata():
assert repr(Masked([])) == "MaskedNDArray([], dtype=float64)"
class TestMaskedArrayRepr(MaskedArraySetup):
def test_array_str(self):
# very blunt check they work at all.
str(self.ma)
str(self.mb)
str(self.mc)
str(self.msa)
str(self.msb)
str(self.msc)
def test_scalar_str(self):
assert self.mb[0].shape == ()
str(self.mb[0])
assert self.msb[0].shape == ()
str(self.msb[0])
assert self.msc[0].shape == ()
str(self.msc[0])
def test_array_repr(self):
repr(self.ma)
repr(self.mb)
repr(self.mc)
repr(self.msa)
repr(self.msb)
repr(self.msc)
def test_scalar_repr(self):
repr(self.mb[0])
repr(self.msb[0])
repr(self.msc[0])
class TestMaskedQuantityRepr(TestMaskedArrayRepr, QuantitySetup):
pass
class TestMaskedRecarray(MaskedArraySetup):
@classmethod
def setup_class(self):
super().setup_class()
self.ra = self.sa.view(np.recarray)
self.mra = Masked(self.ra, mask=self.mask_sa)
def test_recarray_setup(self):
assert isinstance(self.mra, Masked)
assert isinstance(self.mra, np.recarray)
assert np.all(self.mra.unmasked == self.ra)
assert np.all(self.mra.mask == self.mask_sa)
assert_array_equal(self.mra.view(np.ndarray), self.sa)
assert isinstance(self.mra.a, Masked)
assert_array_equal(self.mra.a.unmasked, self.sa['a'])
assert_array_equal(self.mra.a.mask, self.mask_sa['a'])
def test_recarray_setting(self):
mra = self.mra.copy()
mra.a = self.msa['b']
assert_array_equal(mra.a.unmasked, self.msa['b'].unmasked)
assert_array_equal(mra.a.mask, self.msa['b'].mask)
@pytest.mark.parametrize('attr', [0, 'a'])
def test_recarray_field_getting(self, attr):
mra_a = self.mra.field(attr)
assert isinstance(mra_a, Masked)
assert_array_equal(mra_a.unmasked, self.sa['a'])
assert_array_equal(mra_a.mask, self.mask_sa['a'])
@pytest.mark.parametrize('attr', [0, 'a'])
def test_recarray_field_setting(self, attr):
mra = self.mra.copy()
mra.field(attr, self.msa['b'])
assert_array_equal(mra.a.unmasked, self.msa['b'].unmasked)
assert_array_equal(mra.a.mask, self.msa['b'].mask)
class TestMaskedArrayInteractionWithNumpyMA(MaskedArraySetup):
def test_masked_array_from_masked(self):
"""Check that we can initialize a MaskedArray properly."""
np_ma = np.ma.MaskedArray(self.ma)
assert type(np_ma) is np.ma.MaskedArray
assert type(np_ma.data) is self._data_cls
assert type(np_ma.mask) is np.ndarray
assert_array_equal(np_ma.data, self.a)
assert_array_equal(np_ma.mask, self.mask_a)
def test_view_as_masked_array(self):
"""Test that we can be viewed as a MaskedArray."""
np_ma = self.ma.view(np.ma.MaskedArray)
assert type(np_ma) is np.ma.MaskedArray
assert type(np_ma.data) is self._data_cls
assert type(np_ma.mask) is np.ndarray
assert_array_equal(np_ma.data, self.a)
assert_array_equal(np_ma.mask, self.mask_a)
class TestMaskedQuantityInteractionWithNumpyMA(
TestMaskedArrayInteractionWithNumpyMA, QuantitySetup):
pass
|
7305b7c093c44b443e70b4decfdca72f68b97ab595e3c6d7bacfbd8ffdd68385 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from astropy import units as u
from astropy.coordinates import SkyCoord, representation as r
from astropy.time import Time
from astropy.utils.masked import Masked
class TestRepresentations:
def setup_class(self):
self.x = np.array([3., 5., 0.]) << u.m
self.y = np.array([4., 12., 1.]) << u.m
self.z = np.array([0., 0., 1.]) << u.m
self.c = r.CartesianRepresentation(self.x, self.y, self.z)
self.mask = np.array([False, False, True])
self.mx = Masked(self.x, self.mask)
self.my = Masked(self.y, self.mask)
self.mz = Masked(self.z, self.mask)
self.mc = r.CartesianRepresentation(self.mx, self.my, self.mz)
def test_initialization(self):
check = self.mc.z == self.mz
assert_array_equal(check.unmasked, np.ones(3, bool))
assert_array_equal(check.mask, self.mask)
assert_array_equal(self.mc.x, self.mx)
assert_array_equal(self.mc.y, self.my)
assert_array_equal(self.mc.z, self.mz)
def test_norm(self):
# Need stacking and erfa override.
norm = self.mc.norm()
assert_array_equal(norm.unmasked, self.c.norm())
assert_array_equal(norm.mask, self.mask)
def test_transformation(self):
msr = self.mc.represent_as(r.SphericalRepresentation)
sr = self.c.represent_as(r.SphericalRepresentation)
for comp in msr.components:
mc = getattr(msr, comp)
c = getattr(sr, comp)
assert_array_equal(mc.unmasked, c)
assert_array_equal(mc.mask, self.mask)
# Transformation back. This also tests erfa.ufunc.s2p, which
# is special in having a core dimension only in the output.
cr2 = sr.represent_as(r.CartesianRepresentation)
mcr2 = msr.represent_as(r.CartesianRepresentation)
for comp in mcr2.components:
mc = getattr(mcr2, comp)
c = getattr(cr2, comp)
assert_array_equal(mc.unmasked, c)
assert_array_equal(mc.mask, self.mask)
class TestSkyCoord:
def setup_class(self):
self.ra = np.array([3., 5., 0.]) << u.hourangle
self.dec = np.array([4., 12., 1.]) << u.deg
self.sc = SkyCoord(self.ra, self.dec)
self.mask = np.array([False, False, True])
self.mra = Masked(self.ra, self.mask)
self.mdec = Masked(self.dec, self.mask)
self.msc = SkyCoord(self.mra, self.mdec)
def test_initialization(self):
check = self.msc.dec == self.mdec
assert_array_equal(check.unmasked, np.ones(3, bool))
assert_array_equal(check.mask, self.mask)
assert_array_equal(self.msc.data.lon, self.mra)
assert_array_equal(self.msc.data.lat, self.mdec)
def test_transformation(self):
gcrs = self.sc.gcrs
mgcrs = self.msc.gcrs
assert_array_equal(mgcrs.data.lon.mask, self.msc.data.lon.mask)
assert_array_equal(mgcrs.data.lon.unmasked, gcrs.data.lon)
assert_array_equal(mgcrs.data.lat.unmasked, gcrs.data.lat)
class TestTime:
def setup_class(self):
self.s = np.array(['2010-11-12T13:14:15.160',
'2010-11-12T13:14:15.161',
'2011-12-13T14:15:16.170'])
self.t = Time(self.s)
# Time formats will currently strip any ndarray subtypes, so we cannot
# initialize a Time with a Masked version of self.s yet. Instead, we
# work around it, for now only testing that masked are preserved by
# transformations.
self.mask = np.array([False, False, True])
self.mt = self.t._apply(Masked, self.mask)
def test_initialization(self):
assert_array_equal(self.mt.jd1.mask, self.mask)
assert_array_equal(self.mt.jd2.mask, self.mask)
assert_array_equal(self.mt.jd1.unmasked, self.t.jd1)
assert_array_equal(self.mt.jd2.unmasked, self.t.jd2)
@pytest.mark.parametrize('format_', ['jd', 'cxcsec', 'jyear'])
def test_different_formats(self, format_):
# Formats do not yet work with everything; e.g., isot is not supported
# since the Masked class does not yet support structured arrays.
tfmt = getattr(self.t, format_)
mtfmt = getattr(self.mt, format_)
check = mtfmt == tfmt
assert_array_equal(check.unmasked, np.ones(3, bool))
assert_array_equal(check.mask, self.mask)
@pytest.mark.parametrize('scale', ['tai', 'tcb', 'ut1'])
def test_transformation(self, scale):
tscl = getattr(self.t, scale)
mtscl = getattr(self.mt, scale)
assert_array_equal(mtscl.jd1.mask, self.mask)
assert_array_equal(mtscl.jd2.mask, self.mask)
assert_array_equal(mtscl.jd1.unmasked, tscl.jd1)
assert_array_equal(mtscl.jd2.unmasked, tscl.jd2)
|
5788a5760fc9af2b983413284971c50089e12dfd648e61b0ff4938eb9a1b5960 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test all functions covered by __array_function__.
Here, run through all functions, with simple tests just to check the helpers.
More complicated tests of functionality, including with subclasses, are done
in test_functions.
TODO: finish full coverage (see also `~astropy.utils.masked.function_helpers`)
- np.linalg
- np.fft (is there any point?)
- np.lib.nanfunctions
"""
import inspect
import itertools
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from astropy.utils.compat import NUMPY_LT_1_19, NUMPY_LT_1_20, NUMPY_LT_1_23
from astropy.units.tests.test_quantity_non_ufuncs import (
get_wrapped_functions)
from astropy.utils.masked import Masked, MaskedNDArray
from astropy.utils.masked.function_helpers import (
MASKED_SAFE_FUNCTIONS,
APPLY_TO_BOTH_FUNCTIONS,
DISPATCHED_FUNCTIONS,
IGNORED_FUNCTIONS,
UNSUPPORTED_FUNCTIONS)
from .test_masked import assert_masked_equal, MaskedArraySetup
all_wrapped_functions = get_wrapped_functions(np)
all_wrapped = set(all_wrapped_functions.values())
class BasicTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
out = func(self.ma, *args, **kwargs)
expected = Masked(func(self.a, *args, **kwargs),
mask=func(self.mask_a, *args, **kwargs))
assert_masked_equal(out, expected)
def check2(self, func, *args, **kwargs):
out = func(self.ma, self.mb, *args, **kwargs)
expected = Masked(func(self.a, self.b, *args, **kwargs),
mask=func(self.mask_a, self.mask_b, *args, **kwargs))
if isinstance(out, (tuple, list)):
for o, x in zip(out, expected):
assert_masked_equal(o, x)
else:
assert_masked_equal(out, expected)
class NoMaskTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
o = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
assert_array_equal(o, expected)
class InvariantMaskTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
o = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, self.mask_a)
class TestShapeInformation(BasicTestSetup):
def test_shape(self):
assert np.shape(self.ma) == (2, 3)
def test_size(self):
assert np.size(self.ma) == 6
def test_ndim(self):
assert np.ndim(self.ma) == 2
class TestShapeManipulation(BasicTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (6, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
self.check(np.atleast_1d)
o, so = np.atleast_1d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1,)
def test_atleast_2d(self):
self.check(np.atleast_2d)
o, so = np.atleast_2d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1, 1)
def test_atleast_3d(self):
self.check(np.atleast_3d)
o, so = np.atleast_3d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1, 1, 1)
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.mc)
assert o.shape == o.mask.shape == (2,)
assert_array_equal(o.unmasked, self.c.squeeze())
assert_array_equal(o.mask, self.mask_c.squeeze())
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
self.check(np.broadcast_to, (3, 2, 3))
self.check(np.broadcast_to, (3, 2, 3), subok=False)
def test_broadcast_arrays(self):
self.check2(np.broadcast_arrays)
self.check2(np.broadcast_arrays, subok=False)
class TestArgFunctions(MaskedArraySetup):
def check(self, function, *args, fill_value=np.nan, **kwargs):
o = function(self.ma, *args, **kwargs)
a_filled = self.ma.filled(fill_value=fill_value)
expected = function(a_filled, *args, **kwargs)
assert_array_equal(o, expected)
def test_argmin(self):
self.check(np.argmin, fill_value=np.inf)
def test_argmax(self):
self.check(np.argmax, fill_value=-np.inf)
def test_argsort(self):
self.check(np.argsort, fill_value=np.nan)
def test_lexsort(self):
self.check(np.lexsort, fill_value=np.nan)
def test_nonzero(self):
self.check(np.nonzero, fill_value=0.)
@pytest.mark.filterwarnings('ignore:Calling nonzero on 0d arrays is deprecated')
def test_nonzero_0d(self):
res1 = Masked(1, mask=False).nonzero()
assert len(res1) == 1
assert_array_equal(res1[0], np.ones(()).nonzero()[0])
res2 = Masked(1, mask=True).nonzero()
assert len(res2) == 1
assert_array_equal(res2[0], np.zeros(()).nonzero()[0])
def test_argwhere(self):
self.check(np.argwhere, fill_value=0.)
def test_argpartition(self):
self.check(np.argpartition, 2, fill_value=np.inf)
def test_flatnonzero(self):
self.check(np.flatnonzero, fill_value=0.)
class TestAlongAxis(MaskedArraySetup):
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.ma, axis=0), axis=0)
out = np.take_along_axis(self.ma, indices, axis=0)
expected = np.take_along_axis(self.a, indices, axis=0)
expected_mask = np.take_along_axis(self.mask_a, indices, axis=0)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_put_along_axis(self):
ma = self.ma.copy()
indices = np.expand_dims(np.argmax(self.ma, axis=0), axis=0)
np.put_along_axis(ma, indices, axis=0, values=-1)
expected = self.a.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, self.mask_a)
np.put_along_axis(ma, indices, axis=0, values=np.ma.masked)
assert_array_equal(ma.unmasked, expected)
expected_mask = self.mask_a.copy()
np.put_along_axis(expected_mask, indices, axis=0, values=True)
assert_array_equal(ma.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.ma)
expected = np.apply_along_axis(np.square, axis, self.a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
@pytest.mark.parametrize('axes', [(1,), 0, (0, -1)])
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.mean(np.square(x), axis)
out = np.apply_over_axes(function, self.ma, axes)
expected = self.ma
for axis in (axes if isinstance(axes, tuple) else (axes,)):
expected = (expected**2).mean(axis, keepdims=True)
assert_array_equal(out.unmasked, expected.unmasked)
assert_array_equal(out.mask, expected.mask)
def test_apply_over_axes_no_reduction(self):
out = np.apply_over_axes(np.cumsum, self.ma, 0)
expected = self.ma.cumsum(axis=0)
assert_masked_equal(out, expected)
def test_apply_over_axes_wrong_size(self):
with pytest.raises(ValueError, match='not.*correct shape'):
np.apply_over_axes(lambda x, axis: x[..., np.newaxis], self.ma, 0)
class TestIndicesFrom(NoMaskTestSetup):
@classmethod
def setup_class(self):
self.a = np.arange(9).reshape(3, 3)
self.mask_a = np.eye(3, dtype=bool)
self.ma = Masked(self.a, self.mask_a)
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantMaskTestSetup):
@classmethod
def setup_class(self):
self.a = np.array([1+2j, 3+4j])
self.mask_a = np.array([True, False])
self.ma = Masked(self.a, mask=self.mask_a)
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantMaskTestSetup):
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.ma)
assert_array_equal(copy, self.ma)
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.ma)
assert_array_equal(farray, self.ma)
class TestArrayCreation(MaskedArraySetup):
def test_empty_like(self):
o = np.empty_like(self.ma)
assert o.shape == (2, 3)
assert isinstance(o, Masked)
assert isinstance(o, np.ndarray)
o2 = np.empty_like(prototype=self.ma)
assert o2.shape == (2, 3)
assert isinstance(o2, Masked)
assert isinstance(o2, np.ndarray)
o3 = np.empty_like(self.ma, subok=False)
assert type(o3) is MaskedNDArray
def test_zeros_like(self):
o = np.zeros_like(self.ma)
assert_array_equal(o.unmasked, np.zeros_like(self.a))
assert_array_equal(o.mask, np.zeros_like(self.mask_a))
o2 = np.zeros_like(a=self.ma)
assert_array_equal(o2.unmasked, np.zeros_like(self.a))
assert_array_equal(o2.mask, np.zeros_like(self.mask_a))
def test_ones_like(self):
o = np.ones_like(self.ma)
assert_array_equal(o.unmasked, np.ones_like(self.a))
assert_array_equal(o.mask, np.zeros_like(self.mask_a))
o2 = np.ones_like(a=self.ma)
assert_array_equal(o2.unmasked, np.ones_like(self.a))
assert_array_equal(o2.mask, np.zeros_like(self.mask_a))
@pytest.mark.parametrize('value', [0.5, Masked(0.5, mask=True), np.ma.masked])
def test_full_like(self, value):
o = np.full_like(self.ma, value)
if value is np.ma.masked:
expected = Masked(o.unmasked, True)
else:
expected = Masked(np.empty_like(self.a))
expected[...] = value
assert_array_equal(o.unmasked, expected.unmasked)
assert_array_equal(o.mask, expected.mask)
class TestAccessingParts(BasicTestSetup):
def test_diag(self):
self.check(np.diag)
def test_diag_1d_input(self):
ma = self.ma.ravel()
o = np.diag(ma)
assert_array_equal(o.unmasked, np.diag(self.a.ravel()))
assert_array_equal(o.mask, np.diag(self.mask_a.ravel()))
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False], self.ma, axis=0)
expected = np.compress([True, False], self.a, axis=0)
expected_mask = np.compress([True, False], self.mask_a, axis=0)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_extract(self):
o = np.extract([True, False, True], self.ma)
expected = np.extract([True, False, True], self.a)
expected_mask = np.extract([True, False, True], self.mask_a)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(MaskedArraySetup):
def test_put(self):
ma = self.ma.copy()
v = Masked([50, 150], [False, True])
np.put(ma, [0, 2], v)
expected = self.a.copy()
np.put(expected, [0, 2], [50, 150])
expected_mask = self.mask_a.copy()
np.put(expected_mask, [0, 2], [False, True])
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
# Indices cannot be masked.
np.put(ma, Masked([0, 2]), v)
with pytest.raises(TypeError):
# Array to put masked values in must be masked.
np.put(self.a.copy(), [0, 2], v)
def test_putmask(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked(np.arange(100, 650, 100),
mask=[False, True, True, True, False, False])
np.putmask(ma, mask, values)
expected = self.a.flatten()
np.putmask(expected, mask, values.unmasked)
expected_mask = self.mask_a.flatten()
np.putmask(expected_mask, mask, values.mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.putmask(self.a.flatten(), mask, values)
def test_place(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked([100, 200], mask=[False, True])
np.place(ma, mask, values)
expected = self.a.flatten()
np.place(expected, mask, values.unmasked)
expected_mask = self.mask_a.flatten()
np.place(expected_mask, mask, values.mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.place(self.a.flatten(), mask, values)
def test_copyto(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked(np.arange(100, 650, 100),
mask=[False, True, True, True, False, False])
np.copyto(ma, values, where=mask)
expected = self.a.flatten()
np.copyto(expected, values.unmasked, where=mask)
expected_mask = self.mask_a.flatten()
np.copyto(expected_mask, values.mask, where=mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.copyto(self.a.flatten(), values, where=mask)
@pytest.mark.parametrize('value', [0.25, np.ma.masked])
def test_fill_diagonal(self, value):
ma = self.ma[:2, :2].copy()
np.fill_diagonal(ma, value)
expected = ma.copy()
expected[np.diag_indices_from(expected)] = value
assert_array_equal(ma.unmasked, expected.unmasked)
assert_array_equal(ma.mask, expected.mask)
class TestRepeat(BasicTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(MaskedArraySetup):
# More tests at TestMaskedArrayConcatenation in test_functions.
def check(self, func, *args, **kwargs):
ma_list = kwargs.pop('ma_list', [self.ma, self.ma])
a_list = [Masked(ma).unmasked for ma in ma_list]
m_list = [Masked(ma).mask for ma in ma_list]
o = func(ma_list, *args, **kwargs)
expected = func(a_list, *args, **kwargs)
expected_mask = func(m_list, *args, **kwargs)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
self.check(np.concatenate, ma_list=[self.a, self.ma])
if not NUMPY_LT_1_20:
# Check that we can accept a dtype argument (introduced in numpy 1.20)
self.check(np.concatenate, dtype='f4')
out = Masked(np.empty((4, 3)))
result = np.concatenate([self.ma, self.ma], out=out)
assert out is result
expected = np.concatenate([self.a, self.a])
expected_mask = np.concatenate([self.mask_a, self.mask_a])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(TypeError):
np.concatenate([self.ma, self.ma], out=np.empty((4, 3)))
def test_stack(self):
self.check(np.stack)
def test_column_stack(self):
self.check(np.column_stack)
def test_hstack(self):
self.check(np.hstack)
def test_vstack(self):
self.check(np.vstack)
def test_dstack(self):
self.check(np.dstack)
def test_block(self):
self.check(np.block)
out = np.block([[0., Masked(1., True)],
[Masked(1, False), Masked(2, False)]])
expected = np.array([[0, 1.], [1, 2]])
expected_mask = np.array([[False, True], [False, False]])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_append(self):
out = np.append(self.ma, self.mc, axis=1)
expected = np.append(self.a, self.c, axis=1)
expected_mask = np.append(self.mask_a, self.mask_c, axis=1)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_insert(self):
obj = (1, 1)
values = Masked([50., 25.], mask=[True, False])
out = np.insert(self.ma.flatten(), obj, values)
expected = np.insert(self.a.flatten(), obj, [50., 25.])
expected_mask = np.insert(self.mask_a.flatten(), obj, [True, False])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(TypeError):
np.insert(self.a.flatten(), obj, values)
with pytest.raises(TypeError):
np.insert(self.ma.flatten(), Masked(obj), values)
class TestSplit:
@classmethod
def setup_class(self):
self.a = np.arange(54.).reshape(3, 3, 6)
self.mask_a = np.zeros(self.a.shape, dtype=bool)
self.mask_a[1, 1, 1] = True
self.mask_a[0, 1, 4] = True
self.mask_a[1, 2, 5] = True
self.ma = Masked(self.a, mask=self.mask_a)
def check(self, func, *args, **kwargs):
out = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
expected_mask = func(self.mask_a, *args, **kwargs)
assert len(out) == len(expected)
for o, x, xm in zip(out, expected, expected_mask):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, xm)
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestMethodLikes(MaskedArraySetup):
def check(self, function, *args, method=None, **kwargs):
if method is None:
method = function.__name__
o = function(self.ma, *args, **kwargs)
x = getattr(self.ma, method)(*args, **kwargs)
assert_masked_equal(o, x)
def test_amax(self):
self.check(np.amax, method='max')
def test_amin(self):
self.check(np.amin, method='min')
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
self.check(np.any)
def test_all(self):
self.check(np.all)
def test_sometrue(self):
self.check(np.sometrue, method='any')
def test_alltrue(self):
self.check(np.alltrue, method='all')
def test_prod(self):
self.check(np.prod)
def test_product(self):
self.check(np.product, method='prod')
def test_cumprod(self):
self.check(np.cumprod)
def test_cumproduct(self):
self.check(np.cumproduct, method='cumprod')
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round_(self):
self.check(np.round_, method='round')
def test_around(self):
self.check(np.around, method='round')
def test_clip(self):
self.check(np.clip, 2., 4.)
self.check(np.clip, self.mb, self.mc)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
self.check(np.var)
class TestUfuncLike(InvariantMaskTestSetup):
def test_fix(self):
self.check(np.fix)
def test_angle(self):
a = np.array([1+0j, 0+1j, 1+1j, 0+0j])
mask_a = np.array([True, False, True, False])
ma = Masked(a, mask=mask_a)
out = np.angle(ma)
expected = np.angle(ma.unmasked)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_a)
def test_i0(self):
self.check(np.i0)
def test_sinc(self):
self.check(np.sinc)
def test_where(self):
mask = [True, False, True]
out = np.where(mask, self.ma, 1000.)
expected = np.where(mask, self.a, 1000.)
expected_mask = np.where(mask, self.mask_a, False)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
mask2 = Masked(mask, [True, False, False])
out2 = np.where(mask2, self.ma, 1000.)
expected2 = np.where(mask, self.a, 1000.)
expected_mask2 = np.where(mask, self.mask_a, False) | mask2.mask
assert_array_equal(out2.unmasked, expected2)
assert_array_equal(out2.mask, expected_mask2)
def test_where_single_arg(self):
m = Masked(np.arange(3), mask=[True, False, False])
out = np.where(m)
expected = m.nonzero()
assert isinstance(out, tuple) and len(out) == 1
assert_array_equal(out[0], expected[0])
def test_where_wrong_number_of_arg(self):
with pytest.raises(ValueError, match='either both or neither'):
np.where([True, False, False], self.a)
def test_choose(self):
a = np.array([0, 1]).reshape((2, 1))
result = np.choose(a, (self.ma, self.mb))
expected = np.choose(a, (self.a, self.b))
expected_mask = np.choose(a, (self.mask_a, self.mask_b))
assert_array_equal(result.unmasked, expected)
assert_array_equal(result.mask, expected_mask)
out = np.zeros_like(result)
result2 = np.choose(a, (self.ma, self.mb), out=out)
assert result2 is out
assert_array_equal(result2, result)
with pytest.raises(TypeError):
np.choose(a, (self.ma, self.mb), out=np.zeros_like(expected))
def test_choose_masked(self):
ma = Masked(np.array([-1, 1]), mask=[True, False]).reshape((2, 1))
out = ma.choose((self.ma, self.mb))
expected = np.choose(ma.filled(0), (self.a, self.b))
expected_mask = np.choose(ma.filled(0), (self.mask_a, self.mask_b)) | ma.mask
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(ValueError):
ma.unmasked.choose((self.ma, self.mb))
@pytest.mark.parametrize('default', [-1., np.ma.masked, Masked(-1, mask=True)])
def test_select(self, default):
a, mask_a, ma = self.a, self.mask_a, self.ma
out = np.select([a < 1.5, a > 3.5], [ma, ma+1], default=default)
expected = np.select([a < 1.5, a > 3.5], [a, a+1],
default=-1 if default is not np.ma.masked else 0)
expected_mask = np.select([a < 1.5, a > 3.5], [mask_a, mask_a],
default=getattr(default, 'mask', False))
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_real_if_close(self):
a = np.array([1+0j, 0+1j, 1+1j, 0+0j])
mask_a = np.array([True, False, True, False])
ma = Masked(a, mask=mask_a)
out = np.real_if_close(ma)
expected = np.real_if_close(a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_a)
def test_tril(self):
self.check(np.tril)
def test_triu(self):
self.check(np.triu)
def test_unwrap(self):
self.check(np.unwrap)
def test_nan_to_num(self):
self.check(np.nan_to_num)
ma = Masked([np.nan, 1.], mask=[True, False])
o = np.nan_to_num(ma, copy=False)
assert_masked_equal(o, Masked([0., 1.], mask=[True, False]))
assert ma is o
class TestUfuncLikeTests:
@classmethod
def setup_class(self):
self.a = np.array([[-np.inf, +np.inf, np.nan, 3., 4.]]*2)
self.mask_a = np.array([[False]*5, [True]*4+[False]])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([[3.0001], [3.9999]])
self.mask_b = np.array([[True], [False]])
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, func):
out = func(self.ma)
expected = func(self.a)
assert type(out) is MaskedNDArray
assert out.dtype.kind == 'b'
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
assert not np.may_share_memory(out.mask, self.mask_a)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
o = np.isreal(Masked([1. + 1j], mask=False))
assert not o.unmasked and not o.mask
o = np.isreal(Masked([1. + 1j], mask=True))
assert not o.unmasked and o.mask
def test_iscomplex(self):
self.check(np.iscomplex)
o = np.iscomplex(Masked([1. + 1j], mask=False))
assert o.unmasked and not o.mask
o = np.iscomplex(Masked([1. + 1j], mask=True))
assert o.unmasked and o.mask
def test_isclose(self):
out = np.isclose(self.ma, self.mb, atol=0.01)
expected = np.isclose(self.ma, self.mb, atol=0.01)
expected_mask = self.mask_a | self.mask_b
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_allclose(self):
out = np.allclose(self.ma, self.mb, atol=0.01)
expected = np.isclose(self.ma, self.mb,
atol=0.01)[self.mask_a | self.mask_b].all()
assert_array_equal(out, expected)
def test_array_equal(self):
assert not np.array_equal(self.ma, self.ma)
assert not np.array_equal(self.ma, self.a)
if not NUMPY_LT_1_19:
assert np.array_equal(self.ma, self.ma, equal_nan=True)
assert np.array_equal(self.ma, self.a, equal_nan=True)
assert not np.array_equal(self.ma, self.mb)
ma2 = self.ma.copy()
ma2.mask |= np.isnan(self.a)
assert np.array_equal(ma2, self.ma)
def test_array_equiv(self):
assert np.array_equiv(self.mb, self.mb)
assert np.array_equiv(self.mb, self.b)
assert not np.array_equiv(self.ma, self.mb)
assert np.array_equiv(self.mb, np.stack([self.mb, self.mb]))
class TestOuterLikeFunctions(MaskedArraySetup):
def test_outer(self):
result = np.outer(self.ma, self.mb)
expected_data = np.outer(self.a.ravel(), self.b.ravel())
expected_mask = np.logical_or.outer(self.mask_a.ravel(),
self.mask_b.ravel())
assert_array_equal(result.unmasked, expected_data)
assert_array_equal(result.mask, expected_mask)
out = np.zeros_like(result)
result2 = np.outer(self.ma, self.mb, out=out)
assert result2 is out
assert result2 is not result
assert_masked_equal(result2, result)
out2 = np.zeros_like(result.unmasked)
with pytest.raises(TypeError):
np.outer(self.ma, self.mb, out=out2)
def test_kron(self):
result = np.kron(self.ma, self.mb)
expected_data = np.kron(self.a, self.b)
expected_mask = np.logical_or.outer(self.mask_a,
self.mask_b).reshape(result.shape)
assert_array_equal(result.unmasked, expected_data)
assert_array_equal(result.mask, expected_mask)
class TestReductionLikeFunctions(MaskedArraySetup):
def test_average(self):
o = np.average(self.ma)
assert_masked_equal(o, self.ma.mean())
o = np.average(self.ma, weights=self.mb, axis=-1)
expected = np.average(self.a, weights=self.b, axis=-1)
expected_mask = (self.mask_a | self.mask_b).any(-1)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_trace(self):
o = np.trace(self.ma)
expected = np.trace(self.a)
expected_mask = np.trace(self.mask_a).astype(bool)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
@pytest.mark.parametrize('axis', [0, 1, None])
def test_count_nonzero(self, axis):
o = np.count_nonzero(self.ma, axis=axis)
expected = np.count_nonzero(self.ma.filled(0), axis=axis)
assert_array_equal(o, expected)
@pytest.mark.filterwarnings('ignore:all-nan')
class TestPartitionLikeFunctions:
@classmethod
def setup_class(self):
self.a = np.arange(36.).reshape(6, 6)
self.mask_a = np.zeros_like(self.a, bool)
# On purpose fill diagonal, so we get all masked elements.
self.mask_a[np.tril_indices_from(self.a)] = True
self.ma = Masked(self.a, mask=self.mask_a)
def check(self, function, *args, **kwargs):
o = function(self.ma, *args, **kwargs)
nanfunc = getattr(np, 'nan'+function.__name__)
nanfilled = self.ma.filled(np.nan)
expected = nanfunc(nanfilled, *args, **kwargs)
assert_array_equal(o.filled(np.nan), expected)
assert_array_equal(o.mask, np.isnan(expected))
if not kwargs.get('axis', 1):
# no need to test for all
return
out = np.zeros_like(o)
o2 = function(self.ma, *args, out=out, **kwargs)
assert o2 is out
assert_masked_equal(o2, o)
with pytest.raises(TypeError):
function(self.ma, *args, out=np.zeros_like(expected), **kwargs)
@pytest.mark.parametrize('axis', [None, 0, 1])
def test_median(self, axis):
self.check(np.median, axis=axis)
@pytest.mark.parametrize('axis', [None, 0, 1])
def test_quantile(self, axis):
self.check(np.quantile, q=[0.25, 0.5], axis=axis)
def test_quantile_out_of_range(self):
with pytest.raises(ValueError, match='must be in the range'):
np.quantile(self.ma, q=1.5)
@pytest.mark.parametrize('axis', [None, 0, 1])
def test_percentile(self, axis):
self.check(np.percentile, q=50, axis=axis)
class TestIntDiffFunctions(MaskedArraySetup):
def test_diff(self):
out = np.diff(self.ma)
expected = np.diff(self.a)
expected_mask = self.mask_a[:, 1:] | self.mask_a[:, :-1]
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_diff_prepend_append(self):
out = np.diff(self.ma, prepend=Masked(-1, mask=True), append=1)
expected = np.diff(self.a, prepend=-1, append=1.)
mask = np.concatenate([np.ones((2, 1), bool),
self.mask_a,
np.zeros((2, 1), bool)], axis=-1)
expected_mask = mask[:, 1:] | mask[:, :-1]
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_trapz(self):
ma = self.ma.copy()
ma.mask[1] = False
out = np.trapz(ma)
assert_array_equal(out.unmasked, np.trapz(self.a))
assert_array_equal(out.mask, np.array([True, False]))
def test_gradient(self):
out = np.gradient(self.ma)
expected = np.gradient(self.a)
expected_mask = [(self.mask_a[1:] | self.mask_a[:-1]).repeat(2, axis=0),
np.stack([
self.mask_a[:, 0] | self.mask_a[:, 1],
self.mask_a[:, 0] | self.mask_a[:, 2],
self.mask_a[:, 1] | self.mask_a[:, 2]], axis=-1)]
for o, x, m in zip(out, expected, expected_mask):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, m)
class TestSpaceFunctions:
@classmethod
def setup_class(self):
self.a = np.arange(1., 7.).reshape(2, 3)
self.mask_a = np.array([[True, False, False],
[False, True, False]])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([2.5, 10., 3.])
self.mask_b = np.array([False, True, False])
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, function, *args, **kwargs):
out = function(self.ma, self.mb, 5)
expected = function(self.a, self.b, 5)
expected_mask = np.broadcast_to(self.mask_a | self.mask_b,
expected.shape).copy()
# TODO: make implementation that also ensures start point mask is
# determined just by start point? (as for geomspace in numpy 1.20)?
expected_mask[-1] = self.mask_b
if not NUMPY_LT_1_20 and function is np.geomspace:
expected_mask[0] = self.mask_a
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_linspace(self):
self.check(np.linspace, 5)
def test_logspace(self):
self.check(np.logspace, 10)
def test_geomspace(self):
self.check(np.geomspace, 5)
class TestInterpolationFunctions(MaskedArraySetup):
def test_interp(self):
xp = np.arange(5.)
fp = np.array([1., 5., 6., 19., 20.])
mask_fp = np.array([False, False, False, True, False])
mfp = Masked(fp, mask=mask_fp)
x = np.array([1.5, 17.])
mask_x = np.array([False, True])
mx = Masked(x, mask=mask_x)
out = np.interp(mx, xp, mfp)
expected = np.interp(x, xp[~mask_fp], fp[~mask_fp])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_x)
def test_piecewise(self):
condlist = [self.a < 1, self.a >= 1]
out = np.piecewise(self.ma, condlist, [Masked(-1, mask=True), 1.])
expected = np.piecewise(self.a, condlist, [-1, 1.])
expected_mask = np.piecewise(self.mask_a, condlist, [True, False])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
condlist2 = [self.a < 1, self.a >= 3]
out2 = np.piecewise(self.ma, condlist2,
[Masked(-1, True), 1, lambda x: Masked(np.full_like(x, 2.),
mask=~x.mask)])
expected = np.piecewise(self.a, condlist2, [-1, 1, 2])
expected_mask = np.piecewise(self.mask_a, condlist2,
[True, False, lambda x: ~x])
assert_array_equal(out2.unmasked, expected)
assert_array_equal(out2.mask, expected_mask)
with pytest.raises(ValueError, match='with 2 condition'):
np.piecewise(self.ma, condlist2, [])
def test_regression_12978(self):
"""Regression tests for https://github.com/astropy/astropy/pull/12978"""
# This case produced incorrect results
mask = [False, True, False]
x = np.array([1, 2, 3])
xp = Masked(np.array([1, 2, 3]), mask=mask)
fp = Masked(np.array([1, 2, 3]), mask=mask)
result = np.interp(x, xp, fp)
assert_array_equal(result, x)
# This case raised a ValueError
xp = np.array([1, 3])
fp = Masked(np.array([1, 3]))
result = np.interp(x, xp, fp)
assert_array_equal(result, x)
class TestBincount(MaskedArraySetup):
def test_bincount(self):
i = np.array([1, 1, 2, 3, 2, 4])
mask_i = np.array([True, False, False, True, False, False])
mi = Masked(i, mask=mask_i)
out = np.bincount(mi)
expected = np.bincount(i[~mask_i])
assert_array_equal(out, expected)
w = np.arange(len(i))
mask_w = np.array([True]+[False]*5)
mw = Masked(w, mask=mask_w)
out2 = np.bincount(i, mw)
expected = np.bincount(i, w)
expected_mask = np.array([False, True, False, False, False])
assert_array_equal(out2.unmasked, expected)
assert_array_equal(out2.mask, expected_mask)
out3 = np.bincount(mi, mw)
expected = np.bincount(i[~mask_i], w[~mask_i])
expected_mask = np.array([False, False, False, False, False])
assert_array_equal(out3.unmasked, expected)
assert_array_equal(out3.mask, expected_mask)
class TestSortFunctions(MaskedArraySetup):
def test_sort(self):
o = np.sort(self.ma)
expected = self.ma.copy()
expected.sort()
assert_masked_equal(o, expected)
def test_sort_complex(self):
ma = Masked(np.array([1+2j, 0+4j, 3+0j, -1-1j]),
mask=[True, False, False, False])
o = np.sort_complex(ma)
indx = np.lexsort((ma.unmasked.imag, ma.unmasked.real, ma.mask))
expected = ma[indx]
assert_masked_equal(o, expected)
def test_msort(self):
o = np.msort(self.ma)
expected = np.sort(self.ma, axis=0)
assert_masked_equal(o, expected)
def test_partition(self):
o = np.partition(self.ma, 1)
expected = self.ma.copy()
expected.partition(1)
assert_masked_equal(o, expected)
class TestStringFunctions:
# More elaborate tests done in test_masked.py
@classmethod
def setup_class(self):
self.ma = Masked(np.arange(3), mask=[True, False, False])
def test_array2string(self):
out0 = np.array2string(self.ma)
assert out0 == '[— 1 2]'
# Arguments are interpreted as usual.
out1 = np.array2string(self.ma, separator=', ')
assert out1 == '[—, 1, 2]'
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.ma, separator=', ', formatter={'all': hex})
assert out2 == '[———, 0x1, 0x2]'
# Also as positional argument (no, nobody will do this!)
out3 = np.array2string(self.ma, None, None, None, ', ', '',
np._NoValue, {'int': hex})
assert out3 == out2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.ma, separator=', ', formatter={'float': hex})
assert out4 == out1
def test_array_repr(self):
out = np.array_repr(self.ma)
assert out == 'MaskedNDArray([—, 1, 2])'
ma2 = self.ma.astype('f4')
out2 = np.array_repr(ma2)
assert out2 == 'MaskedNDArray([——, 1., 2.], dtype=float32)'
def test_array_str(self):
out = np.array_str(self.ma)
assert out == '[— 1 2]'
class TestBitFunctions:
@classmethod
def setup_class(self):
self.a = np.array([15, 255, 0], dtype='u1')
self.mask_a = np.array([False, True, False])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.unpackbits(self.a).reshape(6, 4)
self.mask_b = np.array([False]*15 + [True, True] + [False]*7).reshape(6, 4)
self.mb = Masked(self.b, mask=self.mask_b)
@pytest.mark.parametrize('axis', [None, 1, 0])
def test_packbits(self, axis):
out = np.packbits(self.mb, axis=axis)
if axis is None:
expected = self.a
else:
expected = np.packbits(self.b, axis=axis)
expected_mask = np.packbits(self.mask_b, axis=axis) > 0
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_unpackbits(self):
out = np.unpackbits(self.ma)
mask = np.where(self.mask_a, np.uint8(255), np.uint8(0))
expected_mask = np.unpackbits(mask) > 0
assert_array_equal(out.unmasked, self.b.ravel())
assert_array_equal(out.mask, expected_mask)
class TestIndexFunctions(MaskedArraySetup):
"""Does not seem much sense to support these..."""
def test_unravel_index(self):
with pytest.raises(TypeError):
np.unravel_index(self.ma, 3)
def test_ravel_multi_index(self):
with pytest.raises(TypeError):
np.ravel_multi_index((self.ma,), 3)
def test_ix_(self):
with pytest.raises(TypeError):
np.ix_(self.ma)
class TestDtypeFunctions(MaskedArraySetup):
def check(self, function, *args, **kwargs):
out = function(self.ma, *args, **kwargs)
expected = function(self.a, *args, **kwargs)
assert out == expected
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.a.dtype)
self.check(np.can_cast, 'f4')
def test_min_scalar_type(self):
out = np.min_scalar_type(self.ma[0, 0])
expected = np.min_scalar_type(self.a[0, 0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
class TestMeshGrid(MaskedArraySetup):
def test_meshgrid(self):
a = np.arange(1., 4.)
mask_a = np.array([True, False, False])
ma = Masked(a, mask=mask_a)
b = np.array([2.5, 10., 3., 4.])
mask_b = np.array([False, True, False, True])
mb = Masked(b, mask=mask_b)
oa, ob = np.meshgrid(ma, mb)
xa, xb = np.broadcast_arrays(a, b[:, np.newaxis])
ma, mb = np.broadcast_arrays(mask_a, mask_b[:, np.newaxis])
for o, x, m in ((oa, xa, ma), (ob, xb, mb)):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, m)
class TestMemoryFunctions(MaskedArraySetup):
def test_shares_memory(self):
assert np.shares_memory(self.ma, self.ma.unmasked)
assert not np.shares_memory(self.ma, self.ma.mask)
def test_may_share_memory(self):
assert np.may_share_memory(self.ma, self.ma.unmasked)
assert not np.may_share_memory(self.ma, self.ma.mask)
class TestDatetimeFunctions:
# Could in principle support np.is_busday, np.busday_count, np.busday_offset.
@classmethod
def setup_class(self):
self.a = np.array(['2020-12-31', '2021-01-01', '2021-01-02'], dtype='M')
self.mask_a = np.array([False, True, False])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([['2021-01-07'], ['2021-01-31']], dtype='M')
self.mask_b = np.array([[False], [True]])
self.mb = Masked(self.b, mask=self.mask_b)
def test_datetime_as_string(self):
out = np.datetime_as_string(self.ma)
expected = np.datetime_as_string(self.a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
@pytest.mark.filterwarnings('ignore:all-nan')
class TestNaNFunctions:
def setup_class(self):
self.a = np.array([[np.nan, np.nan, 3.],
[4., 5., 6.]])
self.mask_a = np.array([[True, False, False],
[False, True, False]])
self.b = np.arange(1, 7).reshape(2, 3)
self.mask_b = self.mask_a
self.ma = Masked(self.a, mask=self.mask_a)
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, function, exact_fill_value=None, masked_result=True,
**kwargs):
result = function(self.ma, **kwargs)
expected_data = function(self.ma.filled(np.nan), **kwargs)
expected_mask = np.isnan(expected_data)
if masked_result:
assert isinstance(result, Masked)
assert_array_equal(result.mask, expected_mask)
assert np.all(result == expected_data)
else:
assert not isinstance(result, Masked)
assert_array_equal(result, expected_data)
assert not np.any(expected_mask)
out = np.zeros_like(result)
result2 = function(self.ma, out=out, **kwargs)
assert result2 is out
assert_array_equal(result2, result)
def check_arg(self, function, **kwargs):
# arg functions do not have an 'out' argument, so just test directly.
result = function(self.ma, **kwargs)
assert not isinstance(result, Masked)
expected = function(self.ma.filled(np.nan), **kwargs)
assert_array_equal(result, expected)
def test_nanmin(self):
self.check(np.nanmin)
self.check(np.nanmin, axis=0)
self.check(np.nanmin, axis=1)
resi = np.nanmin(self.mb, axis=1)
assert_array_equal(resi.unmasked, np.array([2, 4]))
assert_array_equal(resi.mask, np.array([False, False]))
def test_nanmax(self):
self.check(np.nanmax)
def test_nanargmin(self):
self.check_arg(np.nanargmin)
self.check_arg(np.nanargmin, axis=1)
def test_nanargmax(self):
self.check_arg(np.nanargmax)
def test_nansum(self):
self.check(np.nansum, masked_result=False)
resi = np.nansum(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([5, 10]))
def test_nanprod(self):
self.check(np.nanprod, masked_result=False)
resi = np.nanprod(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([6, 24]))
def test_nancumsum(self):
self.check(np.nancumsum, masked_result=False)
resi = np.nancumsum(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([[0, 2, 5], [4, 4, 10]]))
def test_nancumprod(self):
self.check(np.nancumprod, masked_result=False)
resi = np.nancumprod(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([[1, 2, 6], [4, 4, 24]]))
def test_nanmean(self):
self.check(np.nanmean)
resi = np.nanmean(self.mb, axis=1)
assert_array_equal(resi.unmasked, np.mean(self.mb, axis=1).unmasked)
assert_array_equal(resi.mask, np.array([False, False]))
def test_nanvar(self):
self.check(np.nanvar)
self.check(np.nanvar, ddof=1)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nanquantile(self):
self.check(np.nanquantile, q=0.5)
def test_nanpercentile(self):
self.check(np.nanpercentile, q=50)
untested_functions = set()
if NUMPY_LT_1_20:
financial_functions = {f for f in all_wrapped_functions.values()
if f in np.lib.financial.__dict__.values()}
untested_functions |= financial_functions
if NUMPY_LT_1_23:
deprecated_functions = {
# Deprecated, removed in numpy 1.23
np.asscalar, np.alen,
}
else:
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander
}
untested_functions |= poly_functions
# Get covered functions
tested_functions = set()
for cov_cls in list(filter(inspect.isclass, locals().values())):
for k, v in cov_cls.__dict__.items():
if inspect.isfunction(v) and k.startswith('test'):
f = k.replace('test_', '')
if f in all_wrapped_functions:
tested_functions.add(all_wrapped_functions[f])
def test_basic_testing_completeness():
assert all_wrapped == (tested_functions
| IGNORED_FUNCTIONS
| UNSUPPORTED_FUNCTIONS)
@pytest.mark.xfail(reason='coverage not completely set up yet')
def test_testing_completeness():
assert not tested_functions.intersection(untested_functions)
assert all_wrapped == (tested_functions | untested_functions)
class TestFunctionHelpersCompleteness:
@pytest.mark.parametrize('one, two', itertools.combinations(
(MASKED_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
set(APPLY_TO_BOTH_FUNCTIONS.keys()),
set(DISPATCHED_FUNCTIONS.keys())), 2))
def test_no_duplicates(self, one, two):
assert not one.intersection(two)
def test_all_included(self):
included_in_helpers = (MASKED_SAFE_FUNCTIONS |
UNSUPPORTED_FUNCTIONS |
set(APPLY_TO_BOTH_FUNCTIONS.keys()) |
set(DISPATCHED_FUNCTIONS.keys()))
assert all_wrapped == included_in_helpers
@pytest.mark.xfail(reason='coverage not completely set up yet')
def test_ignored_are_untested(self):
assert IGNORED_FUNCTIONS == untested_functions
|
335b0da6a0a90074c2978ec557741b98f4be23ece77fa4625d47e8dafde672dd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from textwrap import indent
from collections import OrderedDict
from .coordinate_helpers import CoordinateHelper
from .frame import RectangularFrame, RectangularFrame1D
from .coordinate_range import find_coordinate_range
class CoordinatesMap:
"""
A container for coordinate helpers that represents a coordinate system.
This object can be used to access coordinate helpers by index (like a list)
or by name (like a dictionary).
Parameters
----------
axes : :class:`~astropy.visualization.wcsaxes.WCSAxes`
The axes the coordinate map belongs to.
transform : `~matplotlib.transforms.Transform`, optional
The transform for the data.
coord_meta : dict, optional
A dictionary providing additional metadata. This should include the keys
``type``, ``wrap``, and ``unit``. Each of these should be a list with as
many items as the dimension of the coordinate system. The ``type``
entries should be one of ``longitude``, ``latitude``, or ``scalar``, the
``wrap`` entries should give, for the longitude, the angle at which the
coordinate wraps (and `None` otherwise), and the ``unit`` should give
the unit of the coordinates as :class:`~astropy.units.Unit` instances.
This can optionally also include a ``format_unit`` entry giving the
units to use for the tick labels (if not specified, this defaults to
``unit``).
frame_class : type, optional
The class for the frame, which should be a subclass of
:class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a
:class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`
previous_frame_path : `~matplotlib.path.Path`, optional
When changing the WCS of the axes, the frame instance will change but
we might want to keep re-using the same underlying matplotlib
`~matplotlib.path.Path` - in that case, this can be passed to this
keyword argument.
"""
def __init__(self, axes, transform=None, coord_meta=None,
frame_class=RectangularFrame, previous_frame_path=None):
self._axes = axes
self._transform = transform
self.frame = frame_class(axes, self._transform, path=previous_frame_path)
# Set up coordinates
self._coords = []
self._aliases = {}
visible_count = 0
for index in range(len(coord_meta['type'])):
# Extract coordinate metadata
coord_type = coord_meta['type'][index]
coord_wrap = coord_meta['wrap'][index]
coord_unit = coord_meta['unit'][index]
name = coord_meta['name'][index]
visible = True
if 'visible' in coord_meta:
visible = coord_meta['visible'][index]
format_unit = None
if 'format_unit' in coord_meta:
format_unit = coord_meta['format_unit'][index]
default_label = name[0] if isinstance(name, (tuple, list)) else name
if 'default_axis_label' in coord_meta:
default_label = coord_meta['default_axis_label'][index]
coord_index = None
if visible:
visible_count += 1
coord_index = visible_count - 1
self._coords.append(CoordinateHelper(parent_axes=axes,
parent_map=self,
transform=self._transform,
coord_index=coord_index,
coord_type=coord_type,
coord_wrap=coord_wrap,
coord_unit=coord_unit,
format_unit=format_unit,
frame=self.frame,
default_label=default_label))
# Set up aliases for coordinates
if isinstance(name, tuple):
for nm in name:
nm = nm.lower()
# Do not replace an alias already in the map if we have
# more than one alias for this axis.
if nm not in self._aliases:
self._aliases[nm] = index
else:
self._aliases[name.lower()] = index
def __getitem__(self, item):
if isinstance(item, str):
return self._coords[self._aliases[item.lower()]]
else:
return self._coords[item]
def __contains__(self, item):
if isinstance(item, str):
return item.lower() in self._aliases
else:
return 0 <= item < len(self._coords)
def set_visible(self, visibility):
raise NotImplementedError()
def __iter__(self):
yield from self._coords
def grid(self, draw_grid=True, grid_type=None, **kwargs):
"""
Plot gridlines for both coordinates.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments.
Parameters
----------
draw_grid : bool
Whether to show the gridlines
grid_type : { 'lines' | 'contours' }
Whether to plot the contours by determining the grid lines in
world coordinates and then plotting them in world coordinates
(``'lines'``) or by determining the world coordinates at many
positions in the image and then drawing contours
(``'contours'``). The first is recommended for 2-d images, while
for 3-d (or higher dimensional) cubes, the ``'contours'`` option
is recommended. By default, 'lines' is used if the transform has
an inverse, otherwise 'contours' is used.
"""
for coord in self:
coord.grid(draw_grid=draw_grid, grid_type=grid_type, **kwargs)
def get_coord_range(self):
xmin, xmax = self._axes.get_xlim()
if isinstance(self.frame, RectangularFrame1D):
extent = [xmin, xmax]
else:
ymin, ymax = self._axes.get_ylim()
extent = [xmin, xmax, ymin, ymax]
return find_coordinate_range(self._transform,
extent,
[coord.coord_type for coord in self if coord.coord_index is not None],
[coord.coord_unit for coord in self if coord.coord_index is not None],
[coord.coord_wrap for coord in self if coord.coord_index is not None])
def _as_table(self):
# Import Table here to avoid importing the astropy.table package
# every time astropy.visualization.wcsaxes is imported.
from astropy.table import Table # noqa
rows = []
for icoord, coord in enumerate(self._coords):
aliases = [key for key, value in self._aliases.items() if value == icoord]
row = OrderedDict([('index', icoord), ('aliases', ' '.join(aliases)),
('type', coord.coord_type), ('unit', coord.coord_unit),
('wrap', coord.coord_wrap), ('format_unit', coord.get_format_unit()),
('visible', 'no' if coord.coord_index is None else 'yes')])
rows.append(row)
return Table(rows=rows)
def __repr__(self):
s = f'<CoordinatesMap with {len(self._coords)} world coordinates:\n\n'
table = indent(str(self._as_table()), ' ')
return s + table + '\n\n>'
|
0536edb77b3f4ecdafbb784365d46dedab7f90fdc084a3122408636d5fe60d36 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import pytest
from astropy.utils.compat.optional_deps import HAS_PLT
if HAS_PLT:
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from astropy.visualization.units import quantity_support
def teardown_function(function):
plt.close('all')
@pytest.mark.skipif('not HAS_PLT')
def test_units():
plt.figure()
with quantity_support():
buff = io.BytesIO()
plt.plot([1, 2, 3] * u.m, [3, 4, 5] * u.kg, label='label')
plt.plot([105, 210, 315] * u.cm, [3050, 3025, 3010] * u.g)
plt.legend()
# Also test fill_between, which requires actual conversion to ndarray
# with numpy >=1.10 (#4654).
plt.fill_between([1, 3] * u.m, [3, 5] * u.kg, [3050, 3010] * u.g)
plt.savefig(buff, format='svg')
assert plt.gca().xaxis.get_units() == u.m
assert plt.gca().yaxis.get_units() == u.kg
@pytest.mark.skipif('not HAS_PLT')
def test_units_errbarr():
pytest.importorskip("matplotlib")
plt.figure()
with quantity_support():
x = [1, 2, 3] * u.s
y = [1, 2, 3] * u.m
yerr = [3, 2, 1] * u.cm
fig, ax = plt.subplots()
ax.errorbar(x, y, yerr=yerr)
assert ax.xaxis.get_units() == u.s
assert ax.yaxis.get_units() == u.m
@pytest.mark.skipif('not HAS_PLT')
def test_incompatible_units():
# NOTE: minversion check does not work properly for matplotlib dev.
try:
# https://github.com/matplotlib/matplotlib/pull/13005
from matplotlib.units import ConversionError
except ImportError:
err_type = u.UnitConversionError
else:
err_type = ConversionError
plt.figure()
with quantity_support():
plt.plot([1, 2, 3] * u.m)
with pytest.raises(err_type):
plt.plot([105, 210, 315] * u.kg)
@pytest.mark.skipif('not HAS_PLT')
def test_quantity_subclass():
"""Check that subclasses are recognized.
This sadly is not done by matplotlib.units itself, though
there is a PR to change it:
https://github.com/matplotlib/matplotlib/pull/13536
"""
plt.figure()
with quantity_support():
plt.scatter(Angle([1, 2, 3], u.deg), [3, 4, 5] * u.kg)
plt.scatter([105, 210, 315] * u.arcsec, [3050, 3025, 3010] * u.g)
plt.plot(Angle([105, 210, 315], u.arcsec), [3050, 3025, 3010] * u.g)
assert plt.gca().xaxis.get_units() == u.deg
assert plt.gca().yaxis.get_units() == u.kg
@pytest.mark.skipif('not HAS_PLT')
def test_nested():
with quantity_support():
with quantity_support():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(Angle([1, 2, 3], u.deg), [3, 4, 5] * u.kg)
assert ax.xaxis.get_units() == u.deg
assert ax.yaxis.get_units() == u.kg
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(Angle([1, 2, 3], u.arcsec), [3, 4, 5] * u.pc)
assert ax.xaxis.get_units() == u.arcsec
assert ax.yaxis.get_units() == u.pc
@pytest.mark.skipif('not HAS_PLT')
def test_empty_hist():
with quantity_support():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.hist([1, 2, 3, 4] * u.mmag, bins=100)
# The second call results in an empty list being passed to the
# unit converter in matplotlib >= 3.1
ax.hist([] * u.mmag, bins=100)
@pytest.mark.skipif('not HAS_PLT')
def test_radian_formatter():
with quantity_support():
fig, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3] * u.rad * np.pi)
fig.canvas.draw()
labels = [tl.get_text() for tl in ax.yaxis.get_ticklabels()]
assert labels == ['π/2', 'π', '3π/2', '2π', '5π/2', '3π', '7π/2']
|
a3abce9b794f648eeee5ec5299469e0baea030b5101c79828d9958143bbd2425 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
pytest.importorskip('matplotlib') # noqa
import matplotlib.pyplot as plt
import matplotlib.dates
from contextlib import nullcontext
from erfa import ErfaWarning
from astropy.time import Time
from astropy.visualization.time import time_support
# Matplotlib 3.3 added a settable epoch for plot dates and changed the default
# from 0000-12-31 to 1970-01-01. This can be checked by the existence of
# get_epoch() in matplotlib.dates.
MPL_EPOCH_1970 = hasattr(matplotlib.dates, 'get_epoch')
# Since some of the examples below use times/dates in the future, we use the
# TAI time scale to avoid ERFA warnings about dubious years.
DEFAULT_SCALE = 'tai'
def get_ticklabels(axis):
axis.figure.canvas.draw()
return [x.get_text() for x in axis.get_ticklabels()]
def teardown_function(function):
plt.close('all')
# We first check that we get the expected labels for different time intervals
# for standard ISO formatting. This is a way to check both the locator and
# formatter code.
RANGE_CASES = [
# Interval of many years
(('2014-03-22T12:30:30.9', '2077-03-22T12:30:32.1'),
['2020-01-01',
'2040-01-01',
'2060-01-01']),
# Interval of a few years
(('2014-03-22T12:30:30.9', '2017-03-22T12:30:32.1'),
['2015-01-01',
'2016-01-01',
'2017-01-01']),
# Interval of just under a year
(('2014-03-22T12:30:30.9', '2015-01-22T12:30:32.1'),
['2014-05-01',
'2014-10-01']),
# Interval of a few months
(('2014-11-22T12:30:30.9', '2015-02-22T12:30:32.1'),
['2014-12-01',
'2015-01-01',
'2015-02-01']),
# Interval of just over a month
(('2014-03-22T12:30:30.9', '2014-04-23T12:30:32.1'),
['2014-04-01']),
# Interval of just under a month
(('2014-03-22T12:30:30.9', '2014-04-21T12:30:32.1'),
['2014-03-24',
'2014-04-03',
'2014-04-13']),
# Interval of just over an hour
(('2014-03-22T12:30:30.9', '2014-03-22T13:31:30.9'),
['2014-03-22T12:40:00.000',
'2014-03-22T13:00:00.000',
'2014-03-22T13:20:00.000']),
# Interval of just under an hour
(('2014-03-22T12:30:30.9', '2014-03-22T13:28:30.9'),
['2014-03-22T12:40:00.000',
'2014-03-22T13:00:00.000',
'2014-03-22T13:20:00.000']),
# Interval of a few minutes
(('2014-03-22T12:30:30.9', '2014-03-22T12:38:30.9'),
['2014-03-22T12:33:00.000',
'2014-03-22T12:36:00.000']),
# Interval of a few seconds
(('2014-03-22T12:30:30.9', '2014-03-22T12:30:40.9'),
['2014-03-22T12:30:33.000',
'2014-03-22T12:30:36.000',
'2014-03-22T12:30:39.000']),
# Interval of a couple of seconds
(('2014-03-22T12:30:30.9', '2014-03-22T12:30:32.1'),
['2014-03-22T12:30:31.000',
'2014-03-22T12:30:31.500',
'2014-03-22T12:30:32.000']),
# Interval of under a second
(('2014-03-22T12:30:30.89', '2014-03-22T12:30:31.19'),
['2014-03-22T12:30:30.900',
'2014-03-22T12:30:31.000',
'2014-03-22T12:30:31.100']),
]
@pytest.mark.parametrize(('interval', 'expected'), RANGE_CASES)
def test_formatter_locator(interval, expected):
# Check that the ticks and labels returned for the above cases are correct.
with time_support():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(Time(interval[0], scale=DEFAULT_SCALE),
Time(interval[1], scale=DEFAULT_SCALE))
assert get_ticklabels(ax.xaxis) == expected
FORMAT_CASES = [
('byear', ['2020', '2040', '2060']),
('byear_str', ['B2020.000', 'B2040.000', 'B2060.000']),
('cxcsec', ['1000000000', '1500000000', '2000000000', '2500000000']),
('decimalyear', ['2020', '2040', '2060']),
('fits', ['2020-01-01T00:00:00.000', '2040-01-01T00:00:00.000', '2060-01-01T00:00:00.000']),
('gps', ['1500000000', '2000000000', '2500000000', '3000000000']),
('iso', ['2020-01-01 00:00:00.000', '2040-01-01 00:00:00.000', '2060-01-01 00:00:00.000']),
('isot', ['2020-01-01T00:00:00.000', '2040-01-01T00:00:00.000', '2060-01-01T00:00:00.000']),
('jd', ['2458000', '2464000', '2470000', '2476000']),
('jyear', ['2020', '2040', '2060']),
('jyear_str', ['J2020.000', 'J2040.000', 'J2060.000']),
('mjd', ['60000', '66000', '72000', '78000']),
('plot_date', (['18000', '24000', '30000', '36000'] if MPL_EPOCH_1970 else
['738000', '744000', '750000', '756000'])),
('unix', ['1500000000', '2000000000', '2500000000', '3000000000']),
('yday', ['2020:001:00:00:00.000', '2040:001:00:00:00.000', '2060:001:00:00:00.000']),
]
@pytest.mark.parametrize(('format', 'expected'), FORMAT_CASES)
def test_formats(format, expected):
# Check that the locators/formatters work fine for all time formats
with time_support(format=format, simplify=False):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# Getting unix time and plot_date requires going through a scale for
# which ERFA emits a warning about the date being dubious
with pytest.warns(ErfaWarning) if format in ['unix', 'plot_date'] else nullcontext():
ax.set_xlim(Time('2014-03-22T12:30:30.9', scale=DEFAULT_SCALE),
Time('2077-03-22T12:30:32.1', scale=DEFAULT_SCALE))
assert get_ticklabels(ax.xaxis) == expected
ax.get_xlabel() == f'Time ({format})'
@pytest.mark.parametrize(('format', 'expected'), FORMAT_CASES)
def test_auto_formats(format, expected):
# Check that the format/scale is taken from the first time used.
with time_support(simplify=False):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# Getting unix time and plot_date requires going through a scale for
# which ERFA emits a warning about the date being dubious
with pytest.warns(ErfaWarning) if format in ['unix', 'plot_date'] else nullcontext():
ax.set_xlim(Time(Time('2014-03-22T12:30:30.9', scale=DEFAULT_SCALE), format=format),
Time('2077-03-22T12:30:32.1', scale=DEFAULT_SCALE))
assert get_ticklabels(ax.xaxis) == expected
ax.get_xlabel() == f'Time ({format})'
FORMAT_CASES_SIMPLIFY = [
('fits', ['2020-01-01', '2040-01-01', '2060-01-01']),
('iso', ['2020-01-01', '2040-01-01', '2060-01-01']),
('isot', ['2020-01-01', '2040-01-01', '2060-01-01']),
('yday', ['2020', '2040', '2060']),
]
@pytest.mark.parametrize(('format', 'expected'), FORMAT_CASES_SIMPLIFY)
def test_formats_simplify(format, expected):
# Check the use of the simplify= option
with time_support(format=format, simplify=True):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(Time('2014-03-22T12:30:30.9', scale=DEFAULT_SCALE),
Time('2077-03-22T12:30:32.1', scale=DEFAULT_SCALE))
assert get_ticklabels(ax.xaxis) == expected
def test_plot():
# Make sure that plot() works properly
with time_support():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(Time('2014-03-22T12:30:30.9', scale=DEFAULT_SCALE),
Time('2077-03-22T12:30:32.1', scale=DEFAULT_SCALE))
ax.plot(Time(['2015-03-22T12:30:30.9',
'2018-03-22T12:30:30.9',
'2021-03-22T12:30:30.9'], scale=DEFAULT_SCALE))
def test_nested():
with time_support(format='iso', simplify=False):
with time_support(format='yday', simplify=True):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(Time('2014-03-22T12:30:30.9', scale=DEFAULT_SCALE),
Time('2077-03-22T12:30:32.1', scale=DEFAULT_SCALE))
assert get_ticklabels(ax.xaxis) == ['2020', '2040', '2060']
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(Time('2014-03-22T12:30:30.9', scale=DEFAULT_SCALE),
Time('2077-03-22T12:30:32.1', scale=DEFAULT_SCALE))
assert get_ticklabels(ax.xaxis) == ['2020-01-01 00:00:00.000',
'2040-01-01 00:00:00.000',
'2060-01-01 00:00:00.000']
|
7ba5c68c2796901587d84802294b95cf022533bf9876fe6e02d4e9970e22a072 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from packaging.version import Version
import pytest
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.contour import QuadContourSet
from astropy import units as u
from astropy.wcs import WCS
from astropy.io import fits
from astropy.coordinates import SkyCoord
from astropy.utils.data import get_pkg_data_filename
from astropy.wcs.wcsapi import SlicedLowLevelWCS, HighLevelWCSWrapper
from astropy.visualization.wcsaxes.core import WCSAxes
from astropy.visualization.wcsaxes.frame import (
EllipticalFrame, RectangularFrame, RectangularFrame1D)
from astropy.visualization.wcsaxes.utils import get_coord_meta
from astropy.visualization.wcsaxes.transforms import CurvedTransform
ft_version = Version(matplotlib.ft2font.__freetype_version__)
FREETYPE_261 = ft_version == Version("2.6.1")
# We cannot use matplotlib.checkdep_usetex() anymore, see
# https://github.com/matplotlib/matplotlib/issues/23244
TEX_UNAVAILABLE = True
MATPLOTLIB_DEV = Version(matplotlib.__version__).is_devrelease
def teardown_function(function):
plt.close('all')
def test_grid_regression(ignore_matplotlibrc):
# Regression test for a bug that meant that if the rc parameter
# axes.grid was set to True, WCSAxes would crash upon initialization.
plt.rc('axes', grid=True)
fig = plt.figure(figsize=(3, 3))
WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
def test_format_coord_regression(ignore_matplotlibrc, tmpdir):
# Regression test for a bug that meant that if format_coord was called by
# Matplotlib before the axes were drawn, an error occurred.
fig = plt.figure(figsize=(3, 3))
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
assert ax.format_coord(10, 10) == ""
assert ax.coords[0].format_coord(10) == ""
assert ax.coords[1].format_coord(10) == ""
fig.savefig(tmpdir.join('nothing').strpath)
assert ax.format_coord(10, 10) == "10.0 10.0 (world)"
assert ax.coords[0].format_coord(10) == "10.0"
assert ax.coords[1].format_coord(10) == "10.0"
TARGET_HEADER = fits.Header.fromstring("""
NAXIS = 2
NAXIS1 = 200
NAXIS2 = 100
CTYPE1 = 'RA---MOL'
CRPIX1 = 500
CRVAL1 = 180.0
CDELT1 = -0.4
CUNIT1 = 'deg '
CTYPE2 = 'DEC--MOL'
CRPIX2 = 400
CRVAL2 = 0.0
CDELT2 = 0.4
CUNIT2 = 'deg '
COORDSYS= 'icrs '
""", sep='\n')
@pytest.mark.parametrize('grid_type', ['lines', 'contours'])
def test_no_numpy_warnings(ignore_matplotlibrc, tmpdir, grid_type):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.imshow(np.zeros((100, 200)))
ax.coords.grid(color='white', grid_type=grid_type)
# There should be no warnings raised if some pixels are outside WCS
# (since this is normal).
# BUT our own catch_warning was ignoring some warnings before, so now we
# have to catch it. Otherwise, the pytest filterwarnings=error
# setting in setup.cfg will fail this test.
# There are actually multiple warnings but they are all similar.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=r'.*converting a masked element to nan.*')
warnings.filterwarnings('ignore', message=r'.*No contour levels were found within the data range.*')
warnings.filterwarnings('ignore', message=r'.*np\.asscalar\(a\) is deprecated since NumPy v1\.16.*')
warnings.filterwarnings('ignore', message=r'.*PY_SSIZE_T_CLEAN will be required.*')
fig.savefig(tmpdir.join('test.png').strpath)
def test_invalid_frame_overlay(ignore_matplotlibrc):
# Make sure a nice error is returned if a frame doesn't exist
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
with pytest.raises(ValueError) as exc:
ax.get_coords_overlay('banana')
assert exc.value.args[0] == 'Frame banana not found'
with pytest.raises(ValueError) as exc:
get_coord_meta('banana')
assert exc.value.args[0] == 'Unknown frame: banana'
def test_plot_coord_transform(ignore_matplotlibrc):
twoMASS_k_header = get_pkg_data_filename('data/2MASS_k_header')
twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(359.76045223*u.deg, 0.26876217*u.deg)
with pytest.raises(TypeError):
ax.plot_coord(c, 'o', transform=ax.get_transform('galactic'))
def test_set_label_properties(ignore_matplotlibrc):
# Regression test to make sure that arguments passed to
# set_xlabel/set_ylabel are passed to the underlying coordinate helpers
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.set_xlabel('Test x label', labelpad=2, color='red')
ax.set_ylabel('Test y label', labelpad=3, color='green')
assert ax.coords[0].axislabels.get_text() == 'Test x label'
assert ax.coords[0].axislabels.get_minpad('b') == 2
assert ax.coords[0].axislabels.get_color() == 'red'
assert ax.coords[1].axislabels.get_text() == 'Test y label'
assert ax.coords[1].axislabels.get_minpad('l') == 3
assert ax.coords[1].axislabels.get_color() == 'green'
assert ax.get_xlabel() == 'Test x label'
assert ax.get_ylabel() == 'Test y label'
GAL_HEADER = fits.Header.fromstring("""
SIMPLE = T / conforms to FITS standard
BITPIX = -32 / array data type
NAXIS = 3 / number of array dimensions
NAXIS1 = 31
NAXIS2 = 2881
NAXIS3 = 480
EXTEND = T
CTYPE1 = 'DISTMOD '
CRVAL1 = 3.5
CDELT1 = 0.5
CRPIX1 = 1.0
CTYPE2 = 'GLON-CAR'
CRVAL2 = 180.0
CDELT2 = -0.125
CRPIX2 = 1.0
CTYPE3 = 'GLAT-CAR'
CRVAL3 = 0.0
CDELT3 = 0.125
CRPIX3 = 241.0
""", sep='\n')
def test_slicing_warnings(ignore_matplotlibrc, tmpdir):
# Regression test to make sure that no warnings are emitted by the tick
# locator for the sliced axis when slicing a cube.
# Scalar case
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ['x', 'y', 'z']
wcs3d.wcs.cunit = ['deg', 'deg', 'km/s']
wcs3d.wcs.crpix = [614.5, 856.5, 333]
wcs3d.wcs.cdelt = [6.25, 6.25, 23]
wcs3d.wcs.crval = [0., 0., 1.]
with warnings.catch_warnings():
# https://github.com/astropy/astropy/issues/9690
warnings.filterwarnings('ignore', message=r'.*PY_SSIZE_T_CLEAN.*')
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))
plt.savefig(tmpdir.join('test.png').strpath)
# Angle case
wcs3d = WCS(GAL_HEADER)
with warnings.catch_warnings():
# https://github.com/astropy/astropy/issues/9690
warnings.filterwarnings('ignore', message=r'.*PY_SSIZE_T_CLEAN.*')
plt.clf()
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 2))
plt.savefig(tmpdir.join('test.png').strpath)
def test_plt_xlabel_ylabel(tmpdir):
# Regression test for a bug that happened when using plt.xlabel
# and plt.ylabel with Matplotlib 3.0
plt.subplot(projection=WCS())
plt.xlabel('Galactic Longitude')
plt.ylabel('Galactic Latitude')
plt.savefig(tmpdir.join('test.png').strpath)
def test_grid_type_contours_transform(tmpdir):
# Regression test for a bug that caused grid_type='contours' to not work
# with custom transforms
class CustomTransform(CurvedTransform):
# We deliberately don't define the inverse, and has_inverse should
# default to False.
def transform(self, values):
return values * 1.3
transform = CustomTransform()
coord_meta = {'type': ('scalar', 'scalar'),
'unit': (u.m, u.s),
'wrap': (None, None),
'name': ('x', 'y')}
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8],
transform=transform, coord_meta=coord_meta)
fig.add_axes(ax)
ax.grid(grid_type='contours')
fig.savefig(tmpdir.join('test.png').strpath)
def test_plt_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# plt.imshow was called.
ax = plt.subplot(projection=WCS())
plt.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_ax_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# ax.imshow was called with no origin
ax = plt.subplot(projection=WCS())
ax.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_grid_contour_large_spacing(tmpdir):
# Regression test for a bug that caused a crash when grid was called and
# didn't produce grid lines (due e.g. to too large spacing) and was then
# called again.
filename = tmpdir.join('test.png').strpath
ax = plt.subplot(projection=WCS())
ax.set_xlim(-0.5, 1.5)
ax.set_ylim(-0.5, 1.5)
ax.coords[0].set_ticks(values=[] * u.one)
ax.coords[0].grid(grid_type='contours')
plt.savefig(filename)
ax.coords[0].grid(grid_type='contours')
plt.savefig(filename)
def test_contour_return():
# Regression test for a bug that caused contour and contourf to return None
# instead of the contour object.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
cset = ax.contour(np.arange(16).reshape(4, 4), transform=ax.get_transform('world'))
assert isinstance(cset, QuadContourSet)
cset = ax.contourf(np.arange(16).reshape(4, 4), transform=ax.get_transform('world'))
assert isinstance(cset, QuadContourSet)
def test_contour_empty():
# Regression test for a bug that caused contour to crash if no contours
# were present.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
with pytest.warns(UserWarning, match='No contour levels were found within the data range'):
ax.contour(np.zeros((4, 4)), transform=ax.get_transform('world'))
def test_iterate_coords(ignore_matplotlibrc, tmpdir):
# Regression test for a bug that caused ax.coords to return too few axes
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ['x', 'y', 'z']
wcs3d.wcs.cunit = ['deg', 'deg', 'km/s']
wcs3d.wcs.crpix = [614.5, 856.5, 333]
wcs3d.wcs.cdelt = [6.25, 6.25, 23]
wcs3d.wcs.crval = [0., 0., 1.]
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))
x, y, z = ax.coords
def test_invalid_slices_errors(ignore_matplotlibrc):
# Make sure that users get a clear message when specifying a WCS with
# >2 dimensions without giving the 'slices' argument, or if the 'slices'
# argument has too many/few elements.
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ['x', 'y', 'z']
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))
with pytest.raises(ValueError) as exc:
plt.subplot(1, 1, 1, projection=wcs3d)
assert exc.value.args[0] == ("WCS has more than 2 pixel dimensions, so "
"'slices' should be set")
with pytest.raises(ValueError) as exc:
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1, 2))
assert exc.value.args[0] == ("'slices' should have as many elements as "
"WCS has pixel dimensions (should be 3)")
wcs2d = WCS(naxis=2)
wcs2d.wcs.ctype = ['x', 'y']
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d)
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=('x', 'y'))
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=('y', 'x'))
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=['x', 'y'])
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, 'x'))
assert ax.frame_class is RectangularFrame1D
wcs1d = WCS(naxis=1)
wcs1d.wcs.ctype = ['x']
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs1d)
assert ax.frame_class is RectangularFrame1D
with pytest.raises(ValueError):
plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, 'y'))
EXPECTED_REPR_1 = """
<CoordinatesMap with 3 world coordinates:
index aliases type unit wrap format_unit visible
----- ------------------------------ --------- ---- ---- ----------- -------
0 distmod dist scalar None no
1 pos.galactic.lon glon-car glon longitude deg 360 deg yes
2 pos.galactic.lat glat-car glat latitude deg None deg yes
>
""".strip()
EXPECTED_REPR_2 = """
<CoordinatesMap with 3 world coordinates:
index aliases type unit wrap format_unit visible
----- ------------------------------ --------- ---- ---- ----------- -------
0 distmod dist scalar None yes
1 pos.galactic.lon glon-car glon longitude deg 360 deg yes
2 pos.galactic.lat glat-car glat latitude deg None deg yes
>
""".strip()
def test_repr(ignore_matplotlibrc):
# Unit test to make sure __repr__ looks as expected
wcs3d = WCS(GAL_HEADER)
# Cube header has world coordinates as distance, lon, lat, so start off
# by slicing in a way that we select just lon,lat:
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=(1, 'x', 'y'))
assert repr(ax.coords) == EXPECTED_REPR_1
# Now slice in a way that all world coordinates are still present:
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))
assert repr(ax.coords) == EXPECTED_REPR_2
@pytest.fixture
def time_spectral_wcs_2d():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['FREQ', 'TIME']
wcs.wcs.set()
return wcs
def test_time_wcs(time_spectral_wcs_2d):
# Regression test for a bug that caused WCSAxes to error when using a WCS
# with a time axis.
plt.subplot(projection=time_spectral_wcs_2d)
@pytest.mark.skipif('TEX_UNAVAILABLE')
def test_simplify_labels_usetex(ignore_matplotlibrc, tmpdir):
"""Regression test for https://github.com/astropy/astropy/issues/8004."""
plt.rc('text', usetex=True)
header = {
'NAXIS': 2,
'NAXIS1': 360,
'NAXIS2': 180,
'CRPIX1': 180.5,
'CRPIX2': 90.5,
'CRVAL1': 180.0,
'CRVAL2': 0.0,
'CDELT1': -2 * np.sqrt(2) / np.pi,
'CDELT2': 2 * np.sqrt(2) / np.pi,
'CTYPE1': 'RA---MOL',
'CTYPE2': 'DEC--MOL',
'RADESYS': 'ICRS'}
wcs = WCS(header)
fig, ax = plt.subplots(
subplot_kw=dict(frame_class=EllipticalFrame, projection=wcs))
ax.set_xlim(-0.5, header['NAXIS1'] - 0.5)
ax.set_ylim(-0.5, header['NAXIS2'] - 0.5)
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[0].set_ticks(spacing=45 * u.deg)
ax.coords[1].set_ticks(spacing=30 * u.deg)
ax.grid()
fig.savefig(tmpdir / 'plot.png')
@pytest.mark.parametrize('frame_class', [RectangularFrame, EllipticalFrame])
def test_set_labels_with_coords(ignore_matplotlibrc, frame_class):
"""Test if ``axis.set_xlabel()`` calls the correct ``coords[i]_set_axislabel()`` in a
WCS plot. Regression test for https://github.com/astropy/astropy/issues/10435.
"""
labels = ['RA', 'Declination']
header = {
'NAXIS': 2,
'NAXIS1': 360,
'NAXIS2': 180,
'CRPIX1': 180.5,
'CRPIX2': 90.5,
'CRVAL1': 180.0,
'CRVAL2': 0.0,
'CDELT1': -2 * np.sqrt(2) / np.pi,
'CDELT2': 2 * np.sqrt(2) / np.pi,
'CTYPE1': 'RA---AIT',
'CTYPE2': 'DEC--AIT'}
wcs = WCS(header)
fig, ax = plt.subplots(
subplot_kw=dict(frame_class=frame_class, projection=wcs))
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
assert ax.get_xlabel() == labels[0]
assert ax.get_ylabel() == labels[1]
for i in range(2):
assert ax.coords[i].get_axislabel() == labels[i]
@pytest.mark.parametrize('atol', [0.2, 1.0e-8])
def test_bbox_size(atol):
# Test for the size of a WCSAxes bbox (only have Matplotlib >= 3.0 now)
extents = [11.38888888888889, 3.5, 576.0, 432.0]
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
fig.canvas.draw()
renderer = fig.canvas.renderer
ax_bbox = ax.get_tightbbox(renderer)
# Enforce strict test only with reference Freetype version
if atol < 0.1 and not FREETYPE_261:
pytest.xfail("Exact BoundingBox dimensions are only ensured with FreeType 2.6.1")
assert np.allclose(ax_bbox.extents, extents, atol=atol)
def test_wcs_type_transform_regression():
wcs = WCS(TARGET_HEADER)
sliced_wcs = SlicedLowLevelWCS(wcs, np.s_[1:-1, 1:-1])
ax = plt.subplot(1, 1, 1, projection=wcs)
ax.get_transform(sliced_wcs)
high_wcs = HighLevelWCSWrapper(sliced_wcs)
ax.get_transform(sliced_wcs)
def test_multiple_draws_grid_contours(tmpdir):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS())
ax.grid(color='black', grid_type='contours')
fig.savefig(tmpdir / 'plot.png')
fig.savefig(tmpdir / 'plot.png')
|
402837800849df233960829ea11da410e632056ecdf618ee2f45597cb32ca99e | from astropy.nddata import NDData, NDIOMixin, NDDataRef
# Alias NDDataAllMixins in case this will be renamed ... :-)
NDDataIO = NDDataRef
def test_simple_write_read(tmpdir):
ndd = NDDataIO([1, 2, 3])
assert hasattr(ndd, 'read')
assert hasattr(ndd, 'write')
|
65ea7ca6cfb409109e804cd19e8678b7a6fdeb0714e03bf5d2bc30c73fc8dd4e | import os
import abc
import numpy as np
__all__ = ['BaseLowLevelWCS', 'validate_physical_types']
class BaseLowLevelWCS(metaclass=abc.ABCMeta):
"""
Abstract base class for the low-level WCS interface.
This is described in `APE 14: A shared Python interface for World Coordinate
Systems <https://doi.org/10.5281/zenodo.1188875>`_.
"""
@property
@abc.abstractmethod
def pixel_n_dim(self):
"""
The number of axes in the pixel coordinate system.
"""
@property
@abc.abstractmethod
def world_n_dim(self):
"""
The number of axes in the world coordinate system.
"""
@property
@abc.abstractmethod
def world_axis_physical_types(self):
"""
An iterable of strings describing the physical type for each world axis.
These should be names from the VO UCD1+ controlled Vocabulary
(http://www.ivoa.net/documents/latest/UCDlist.html). If no matching UCD
type exists, this can instead be ``"custom:xxx"``, where ``xxx`` is an
arbitrary string. Alternatively, if the physical type is
unknown/undefined, an element can be `None`.
"""
@property
@abc.abstractmethod
def world_axis_units(self):
"""
An iterable of strings given the units of the world coordinates for each
axis.
The strings should follow the `IVOA VOUnit standard
<http://ivoa.net/documents/VOUnits/>`_ (though as noted in the VOUnit
specification document, units that do not follow this standard are still
allowed, but just not recommended).
"""
@abc.abstractmethod
def pixel_to_world_values(self, *pixel_arrays):
"""
Convert pixel coordinates to world coordinates.
This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays as
input, and pixel coordinates should be zero-based. Returns
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays in units given by
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Note that pixel coordinates are
assumed to be 0 at the center of the first pixel in each dimension. If a
pixel is in a region where the WCS is not defined, NaN can be returned.
The coordinates should be specified in the ``(x, y)`` order, where for
an image, ``x`` is the horizontal coordinate and ``y`` is the vertical
coordinate.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
def array_index_to_world_values(self, *index_arrays):
"""
Convert array indices to world coordinates.
This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values` except that
the indices should be given in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`).
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
return self.pixel_to_world_values(*index_arrays[::-1])
@abc.abstractmethod
def world_to_pixel_values(self, *world_arrays):
"""
Convert world coordinates to pixel coordinates.
This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays as
input in units given by `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Returns
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays. Note that pixel
coordinates are assumed to be 0 at the center of the first pixel in each
dimension. If a world coordinate does not have a matching pixel
coordinate, NaN can be returned. The coordinates should be returned in
the ``(x, y)`` order, where for an image, ``x`` is the horizontal
coordinate and ``y`` is the vertical coordinate.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
def world_to_array_index_values(self, *world_arrays):
"""
Convert world coordinates to array indices.
This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_pixel_values` except that
the indices should be returned in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`). The indices should be
returned as rounded integers.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
pixel_arrays = self.world_to_pixel_values(*world_arrays)
if self.pixel_n_dim == 1:
pixel_arrays = (pixel_arrays,)
else:
pixel_arrays = pixel_arrays[::-1]
array_indices = tuple(np.asarray(np.floor(pixel + 0.5), dtype=np.int_) for pixel in pixel_arrays)
return array_indices[0] if self.pixel_n_dim == 1 else array_indices
@property
@abc.abstractmethod
def world_axis_object_components(self):
"""
A list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` elements giving information
on constructing high-level objects for the world coordinates.
Each element of the list is a tuple with three items:
* The first is a name for the world object this world array
corresponds to, which *must* match the string names used in
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`. Note that names might
appear twice because two world arrays might correspond to a single
world object (e.g. a celestial coordinate might have both “ra” and
“dec” arrays, which correspond to a single sky coordinate object).
* The second element is either a string keyword argument name or a
positional index for the corresponding class from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`.
* The third argument is a string giving the name of the property
to access on the corresponding class from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes` in
order to get numerical values. Alternatively, this argument can be a
callable Python object that takes a high-level coordinate object and
returns the numerical values suitable for passing to the low-level
WCS transformation methods.
See the document
`APE 14: A shared Python interface for World Coordinate Systems
<https://doi.org/10.5281/zenodo.1188875>`_ for examples.
"""
@property
@abc.abstractmethod
def world_axis_object_classes(self):
"""
A dictionary giving information on constructing high-level objects for
the world coordinates.
Each key of the dictionary is a string key from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components`, and each value is a
tuple with three elements or four elements:
* The first element of the tuple must be a class or a string specifying
the fully-qualified name of a class, which will specify the actual
Python object to be created.
* The second element, should be a tuple specifying the positional
arguments required to initialize the class. If
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components` specifies that the
world coordinates should be passed as a positional argument, this this
tuple should include `None` placeholders for the world coordinates.
* The third tuple element must be a dictionary with the keyword
arguments required to initialize the class.
* Optionally, for advanced use cases, the fourth element (if present)
should be a callable Python object that gets called instead of the
class and gets passed the positional and keyword arguments. It should
return an object of the type of the first element in the tuple.
Note that we don't require the classes to be Astropy classes since there
is no guarantee that Astropy will have all the classes to represent all
kinds of world coordinates. Furthermore, we recommend that the output be
kept as human-readable as possible.
The classes used here should have the ability to do conversions by
passing an instance as the first argument to the same class with
different arguments (e.g. ``Time(Time(...), scale='tai')``). This is
a requirement for the implementation of the high-level interface.
The second and third tuple elements for each value of this dictionary
can in turn contain either instances of classes, or if necessary can
contain serialized versions that should take the same form as the main
classes described above (a tuple with three elements with the fully
qualified name of the class, then the positional arguments and the
keyword arguments). For low-level API objects implemented in Python, we
recommend simply returning the actual objects (not the serialized form)
for optimal performance. Implementations should either always or never
use serialized classes to represent Python objects, and should indicate
which of these they follow using the
`~astropy.wcs.wcsapi.BaseLowLevelWCS.serialized_classes` attribute.
See the document
`APE 14: A shared Python interface for World Coordinate Systems
<https://doi.org/10.5281/zenodo.1188875>`_ for examples .
"""
# The following three properties have default fallback implementations, so
# they are not abstract.
@property
def array_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of length
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(row, column)``
order (the convention for arrays in Python).
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is not known or relevant.
"""
if self.pixel_shape is None:
return None
else:
return self.pixel_shape[::-1]
@property
def pixel_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of length
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(x, y)``
order (where for an image, ``x`` is the horizontal coordinate and ``y``
is the vertical coordinate).
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is not known or relevant.
If you are interested in getting a shape that is comparable to that of
a Numpy array, you should use
`~astropy.wcs.wcsapi.BaseLowLevelWCS.array_shape` instead.
"""
return None
@property
def pixel_bounds(self):
"""
The bounds (in pixel coordinates) inside which the WCS is defined,
as a list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim`
``(min, max)`` tuples.
The bounds should be given in ``[(xmin, xmax), (ymin, ymax)]``
order. WCS solutions are sometimes only guaranteed to be accurate
within a certain range of pixel values, for example when defining a
WCS that includes fitted distortions. This is an optional property,
and it should return `None` if a shape is not known or relevant.
"""
return None
@property
def pixel_axis_names(self):
"""
An iterable of strings describing the name for each pixel axis.
If an axis does not have a name, an empty string should be returned
(this is the default behavior for all axes if a subclass does not
override this property). Note that these names are just for display
purposes and are not standardized.
"""
return [''] * self.pixel_n_dim
@property
def world_axis_names(self):
"""
An iterable of strings describing the name for each world axis.
If an axis does not have a name, an empty string should be returned
(this is the default behavior for all axes if a subclass does not
override this property). Note that these names are just for display
purposes and are not standardized. For standardized axis types, see
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_physical_types`.
"""
return [''] * self.world_n_dim
@property
def axis_correlation_matrix(self):
"""
Returns an (`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim`,
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim`) matrix that
indicates using booleans whether a given world coordinate depends on a
given pixel coordinate.
This defaults to a matrix where all elements are `True` in the absence
of any further information. For completely independent axes, the
diagonal would be `True` and all other entries `False`.
"""
return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)
@property
def serialized_classes(self):
"""
Indicates whether Python objects are given in serialized form or as
actual Python objects.
"""
return False
def _as_mpl_axes(self):
"""
Compatibility hook for Matplotlib and WCSAxes. With this method, one can
do::
from astropy.wcs import WCS
import matplotlib.pyplot as plt
wcs = WCS('filename.fits')
fig = plt.figure()
ax = fig.add_axes([0.15, 0.1, 0.8, 0.8], projection=wcs)
...
and this will generate a plot with the correct WCS coordinates on the
axes.
"""
from astropy.visualization.wcsaxes import WCSAxes
return WCSAxes, {'wcs': self}
UCDS_FILE = os.path.join(os.path.dirname(__file__), 'data', 'ucds.txt')
with open(UCDS_FILE) as f:
VALID_UCDS = {x.strip() for x in f.read().splitlines()[1:]}
def validate_physical_types(physical_types):
"""
Validate a list of physical types against the UCD1+ standard
"""
for physical_type in physical_types:
if (physical_type is not None and
physical_type not in VALID_UCDS and
not physical_type.startswith('custom:')):
raise ValueError(
f"'{physical_type}' is not a valid IOVA UCD1+ physical type. "
"It must be a string specified in the list (http://www.ivoa.net/documents/latest/UCDlist.html) "
"or if no matching type exists it can be any string prepended with 'custom:'."
)
|
bf735d3e5c7fcb3418b3e0967bd8c1285775e898a5c1749eb3f6aee4b38d4727 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import importlib
import numpy as np
__all__ = ['deserialize_class', 'wcs_info_str']
def deserialize_class(tpl, construct=True):
"""
Deserialize classes recursively.
"""
if not isinstance(tpl, tuple) or len(tpl) != 3:
raise ValueError("Expected a tuple of three values")
module, klass = tpl[0].rsplit('.', 1)
module = importlib.import_module(module)
klass = getattr(module, klass)
args = tuple(deserialize_class(arg) if isinstance(arg, tuple) else arg for arg in tpl[1])
kwargs = dict((key, deserialize_class(val)) if isinstance(val, tuple) else (key, val) for (key, val) in tpl[2].items())
if construct:
return klass(*args, **kwargs)
else:
return klass, args, kwargs
def wcs_info_str(wcs):
# Overall header
s = f'{wcs.__class__.__name__} Transformation\n\n'
s += ('This transformation has {} pixel and {} world dimensions\n\n'
.format(wcs.pixel_n_dim, wcs.world_n_dim))
s += f'Array shape (Numpy order): {wcs.array_shape}\n\n'
# Pixel dimensions table
array_shape = wcs.array_shape or (0,)
pixel_shape = wcs.pixel_shape or (None,) * wcs.pixel_n_dim
# Find largest between header size and value length
pixel_dim_width = max(9, len(str(wcs.pixel_n_dim)))
pixel_nam_width = max(9, max(len(x) for x in wcs.pixel_axis_names))
pixel_siz_width = max(9, len(str(max(array_shape))))
s += (('{0:' + str(pixel_dim_width) + 's}').format('Pixel Dim') + ' ' +
('{0:' + str(pixel_nam_width) + 's}').format('Axis Name') + ' ' +
('{0:' + str(pixel_siz_width) + 's}').format('Data size') + ' ' +
'Bounds\n')
for ipix in range(wcs.pixel_n_dim):
s += (('{0:' + str(pixel_dim_width) + 'g}').format(ipix) + ' ' +
('{0:' + str(pixel_nam_width) + 's}').format(wcs.pixel_axis_names[ipix] or 'None') + ' ' +
(" " * 5 + str(None) if pixel_shape[ipix] is None else
('{0:' + str(pixel_siz_width) + 'g}').format(pixel_shape[ipix])) + ' ' +
'{:s}'.format(str(None if wcs.pixel_bounds is None else wcs.pixel_bounds[ipix]) + '\n'))
s += '\n'
# World dimensions table
# Find largest between header size and value length
world_dim_width = max(9, len(str(wcs.world_n_dim)))
world_nam_width = max(9, max(len(x) if x is not None else 0 for x in wcs.world_axis_names))
world_typ_width = max(13, max(len(x) if x is not None else 0 for x in wcs.world_axis_physical_types))
s += (('{0:' + str(world_dim_width) + 's}').format('World Dim') + ' ' +
('{0:' + str(world_nam_width) + 's}').format('Axis Name') + ' ' +
('{0:' + str(world_typ_width) + 's}').format('Physical Type') + ' ' +
'Units\n')
for iwrl in range(wcs.world_n_dim):
name = wcs.world_axis_names[iwrl] or 'None'
typ = wcs.world_axis_physical_types[iwrl] or 'None'
unit = wcs.world_axis_units[iwrl] or 'unknown'
s += (('{0:' + str(world_dim_width) + 'd}').format(iwrl) + ' ' +
('{0:' + str(world_nam_width) + 's}').format(name) + ' ' +
('{0:' + str(world_typ_width) + 's}').format(typ) + ' ' +
'{:s}'.format(unit + '\n'))
s += '\n'
# Axis correlation matrix
pixel_dim_width = max(3, len(str(wcs.world_n_dim)))
s += 'Correlation between pixel and world axes:\n\n'
s += (' ' * world_dim_width + ' ' +
('{0:^' + str(wcs.pixel_n_dim * 5 - 2) + 's}').format('Pixel Dim') +
'\n')
s += (('{0:' + str(world_dim_width) + 's}').format('World Dim') +
''.join([' ' + ('{0:' + str(pixel_dim_width) + 'd}').format(ipix)
for ipix in range(wcs.pixel_n_dim)]) +
'\n')
matrix = wcs.axis_correlation_matrix
matrix_str = np.empty(matrix.shape, dtype='U3')
matrix_str[matrix] = 'yes'
matrix_str[~matrix] = 'no'
for iwrl in range(wcs.world_n_dim):
s += (('{0:' + str(world_dim_width) + 'd}').format(iwrl) +
''.join([' ' + ('{0:>' + str(pixel_dim_width) + 's}').format(matrix_str[iwrl, ipix])
for ipix in range(wcs.pixel_n_dim)]) +
'\n')
# Make sure we get rid of the extra whitespace at the end of some lines
return '\n'.join([l.rstrip() for l in s.splitlines()])
|
c1a891af6c441624e237d08deeb3bdd0d0595fb388c1d83fae3a4d806495fb71 | # This file includes the definition of a mix-in class that provides the low-
# and high-level WCS API to the astropy.wcs.WCS object. We keep this code
# isolated in this mix-in class to avoid making the main wcs.py file too
# long.
import warnings
import numpy as np
from astropy import units as u
from astropy.coordinates import SpectralCoord, Galactic, ICRS
from astropy.coordinates.spectral_coordinate import update_differentials_to_match, attach_zero_velocities
from astropy.utils.exceptions import AstropyUserWarning
from astropy.constants import c
from .low_level_api import BaseLowLevelWCS
from .high_level_api import HighLevelWCSMixin
from .wrappers import SlicedLowLevelWCS
__all__ = ['custom_ctype_to_ucd_mapping', 'SlicedFITSWCS', 'FITSWCSAPIMixin']
C_SI = c.si.value
VELOCITY_FRAMES = {
'GEOCENT': 'gcrs',
'BARYCENT': 'icrs',
'HELIOCENT': 'hcrs',
'LSRK': 'lsrk',
'LSRD': 'lsrd'
}
# The spectra velocity frames below are needed for FITS spectral WCS
# (see Greisen 06 table 12) but aren't yet defined as real
# astropy.coordinates frames, so we instead define them here as instances
# of existing coordinate frames with offset velocities. In future we should
# make these real frames so that users can more easily recognize these
# velocity frames when used in SpectralCoord.
# This frame is defined as a velocity of 220 km/s in the
# direction of l=90, b=0. The rotation velocity is defined
# in:
#
# Kerr and Lynden-Bell 1986, Review of galactic constants.
#
# NOTE: this may differ from the assumptions of galcen_v_sun
# in the Galactocentric frame - the value used here is
# the one adopted by the WCS standard for spectral
# transformations.
VELOCITY_FRAMES['GALACTOC'] = Galactic(u=0 * u.km, v=0 * u.km, w=0 * u.km,
U=0 * u.km / u.s, V=-220 * u.km / u.s, W=0 * u.km / u.s,
representation_type='cartesian',
differential_type='cartesian')
# This frame is defined as a velocity of 300 km/s in the
# direction of l=90, b=0. This is defined in:
#
# Transactions of the IAU Vol. XVI B Proceedings of the
# 16th General Assembly, Reports of Meetings of Commissions:
# Comptes Rendus Des Séances Des Commissions, Commission 28,
# p201.
#
# Note that these values differ from those used by CASA
# (308 km/s towards l=105, b=-7) but we use the above values
# since these are the ones defined in Greisen et al (2006).
VELOCITY_FRAMES['LOCALGRP'] = Galactic(u=0 * u.km, v=0 * u.km, w=0 * u.km,
U=0 * u.km / u.s, V=-300 * u.km / u.s, W=0 * u.km / u.s,
representation_type='cartesian',
differential_type='cartesian')
# This frame is defined as a velocity of 368 km/s in the
# direction of l=263.85, b=48.25. This is defined in:
#
# Bennett et al. (2003), First-Year Wilkinson Microwave
# Anisotropy Probe (WMAP) Observations: Preliminary Maps
# and Basic Results
#
# Note that in that paper, the dipole is expressed as a
# temperature (T=3.346 +/- 0.017mK)
VELOCITY_FRAMES['CMBDIPOL'] = Galactic(l=263.85 * u.deg, b=48.25 * u.deg, distance=0 * u.km,
radial_velocity=-(3.346e-3 / 2.725 * c).to(u.km/u.s))
# Mapping from CTYPE axis name to UCD1
CTYPE_TO_UCD1 = {
# Celestial coordinates
'RA': 'pos.eq.ra',
'DEC': 'pos.eq.dec',
'GLON': 'pos.galactic.lon',
'GLAT': 'pos.galactic.lat',
'ELON': 'pos.ecliptic.lon',
'ELAT': 'pos.ecliptic.lat',
'TLON': 'pos.bodyrc.lon',
'TLAT': 'pos.bodyrc.lat',
'HPLT': 'custom:pos.helioprojective.lat',
'HPLN': 'custom:pos.helioprojective.lon',
'HPRZ': 'custom:pos.helioprojective.z',
'HGLN': 'custom:pos.heliographic.stonyhurst.lon',
'HGLT': 'custom:pos.heliographic.stonyhurst.lat',
'CRLN': 'custom:pos.heliographic.carrington.lon',
'CRLT': 'custom:pos.heliographic.carrington.lat',
'SOLX': 'custom:pos.heliocentric.x',
'SOLY': 'custom:pos.heliocentric.y',
'SOLZ': 'custom:pos.heliocentric.z',
# Spectral coordinates (WCS paper 3)
'FREQ': 'em.freq', # Frequency
'ENER': 'em.energy', # Energy
'WAVN': 'em.wavenumber', # Wavenumber
'WAVE': 'em.wl', # Vacuum wavelength
'VRAD': 'spect.dopplerVeloc.radio', # Radio velocity
'VOPT': 'spect.dopplerVeloc.opt', # Optical velocity
'ZOPT': 'src.redshift', # Redshift
'AWAV': 'em.wl', # Air wavelength
'VELO': 'spect.dopplerVeloc', # Apparent radial velocity
'BETA': 'custom:spect.doplerVeloc.beta', # Beta factor (v/c)
'STOKES': 'phys.polarization.stokes', # STOKES parameters
# Time coordinates (https://www.aanda.org/articles/aa/pdf/2015/02/aa24653-14.pdf)
'TIME': 'time',
'TAI': 'time',
'TT': 'time',
'TDT': 'time',
'ET': 'time',
'IAT': 'time',
'UT1': 'time',
'UTC': 'time',
'GMT': 'time',
'GPS': 'time',
'TCG': 'time',
'TCB': 'time',
'TDB': 'time',
'LOCAL': 'time',
# Distance coordinates
'DIST': 'pos.distance',
'DSUN': 'custom:pos.distance.sunToObserver'
# UT() and TT() are handled separately in world_axis_physical_types
}
# Keep a list of additional custom mappings that have been registered. This
# is kept as a list in case nested context managers are used
CTYPE_TO_UCD1_CUSTOM = []
class custom_ctype_to_ucd_mapping:
"""
A context manager that makes it possible to temporarily add new CTYPE to
UCD1+ mapping used by :attr:`FITSWCSAPIMixin.world_axis_physical_types`.
Parameters
----------
mapping : dict
A dictionary mapping a CTYPE value to a UCD1+ value
Examples
--------
Consider a WCS with the following CTYPE::
>>> from astropy.wcs import WCS
>>> wcs = WCS(naxis=1)
>>> wcs.wcs.ctype = ['SPAM']
By default, :attr:`FITSWCSAPIMixin.world_axis_physical_types` returns `None`,
but this can be overridden::
>>> wcs.world_axis_physical_types
[None]
>>> with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
... wcs.world_axis_physical_types
['food.spam']
"""
def __init__(self, mapping):
CTYPE_TO_UCD1_CUSTOM.insert(0, mapping)
self.mapping = mapping
def __enter__(self):
pass
def __exit__(self, type, value, tb):
CTYPE_TO_UCD1_CUSTOM.remove(self.mapping)
class SlicedFITSWCS(SlicedLowLevelWCS, HighLevelWCSMixin):
pass
class FITSWCSAPIMixin(BaseLowLevelWCS, HighLevelWCSMixin):
"""
A mix-in class that is intended to be inherited by the
:class:`~astropy.wcs.WCS` class and provides the low- and high-level WCS API
"""
@property
def pixel_n_dim(self):
return self.naxis
@property
def world_n_dim(self):
return len(self.wcs.ctype)
@property
def array_shape(self):
if self.pixel_shape is None:
return None
else:
return self.pixel_shape[::-1]
@array_shape.setter
def array_shape(self, value):
if value is None:
self.pixel_shape = None
else:
self.pixel_shape = value[::-1]
@property
def pixel_shape(self):
if self._naxis == [0, 0]:
return None
else:
return tuple(self._naxis)
@pixel_shape.setter
def pixel_shape(self, value):
if value is None:
self._naxis = [0, 0]
else:
if len(value) != self.naxis:
raise ValueError("The number of data axes, "
"{}, does not equal the "
"shape {}.".format(self.naxis, len(value)))
self._naxis = list(value)
@property
def pixel_bounds(self):
return self._pixel_bounds
@pixel_bounds.setter
def pixel_bounds(self, value):
if value is None:
self._pixel_bounds = value
else:
if len(value) != self.naxis:
raise ValueError("The number of data axes, "
"{}, does not equal the number of "
"pixel bounds {}.".format(self.naxis, len(value)))
self._pixel_bounds = list(value)
@property
def world_axis_physical_types(self):
types = []
# TODO: need to support e.g. TT(TAI)
for ctype in self.wcs.ctype:
if ctype.upper().startswith(('UT(', 'TT(')):
types.append('time')
else:
ctype_name = ctype.split('-')[0]
for custom_mapping in CTYPE_TO_UCD1_CUSTOM:
if ctype_name in custom_mapping:
types.append(custom_mapping[ctype_name])
break
else:
types.append(CTYPE_TO_UCD1.get(ctype_name.upper(), None))
return types
@property
def world_axis_units(self):
units = []
for unit in self.wcs.cunit:
if unit is None:
unit = ''
elif isinstance(unit, u.Unit):
unit = unit.to_string(format='vounit')
else:
try:
unit = u.Unit(unit).to_string(format='vounit')
except u.UnitsError:
unit = ''
units.append(unit)
return units
@property
def world_axis_names(self):
return list(self.wcs.cname)
@property
def axis_correlation_matrix(self):
# If there are any distortions present, we assume that there may be
# correlations between all axes. Maybe if some distortions only apply
# to the image plane we can improve this?
if self.has_distortion:
return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)
# Assuming linear world coordinates along each axis, the correlation
# matrix would be given by whether or not the PC matrix is zero
matrix = self.wcs.get_pc() != 0
# We now need to check specifically for celestial coordinates since
# these can assume correlations because of spherical distortions. For
# each celestial coordinate we copy over the pixel dependencies from
# the other celestial coordinates.
celestial = (self.wcs.axis_types // 1000) % 10 == 2
celestial_indices = np.nonzero(celestial)[0]
for world1 in celestial_indices:
for world2 in celestial_indices:
if world1 != world2:
matrix[world1] |= matrix[world2]
matrix[world2] |= matrix[world1]
return matrix
def pixel_to_world_values(self, *pixel_arrays):
world = self.all_pix2world(*pixel_arrays, 0)
return world[0] if self.world_n_dim == 1 else tuple(world)
def world_to_pixel_values(self, *world_arrays):
# avoid circular import
from astropy.wcs.wcs import NoConvergence
try:
pixel = self.all_world2pix(*world_arrays, 0)
except NoConvergence as e:
warnings.warn(str(e))
# use best_solution contained in the exception and format the same
# way as all_world2pix does (using _array_converter)
pixel = self._array_converter(lambda *args: e.best_solution,
'input', *world_arrays, 0)
return pixel[0] if self.pixel_n_dim == 1 else tuple(pixel)
@property
def world_axis_object_components(self):
return self._get_components_and_classes()[0]
@property
def world_axis_object_classes(self):
return self._get_components_and_classes()[1]
@property
def serialized_classes(self):
return False
def _get_components_and_classes(self):
# The aim of this function is to return whatever is needed for
# world_axis_object_components and world_axis_object_classes. It's easier
# to figure it out in one go and then return the values and let the
# properties return part of it.
# Since this method might get called quite a few times, we need to cache
# it. We start off by defining a hash based on the attributes of the
# WCS that matter here (we can't just use the WCS object as a hash since
# it is mutable)
wcs_hash = (self.naxis,
list(self.wcs.ctype),
list(self.wcs.cunit),
self.wcs.radesys,
self.wcs.specsys,
self.wcs.equinox,
self.wcs.dateobs,
self.wcs.lng,
self.wcs.lat)
# If the cache is present, we need to check that the 'hash' matches.
if getattr(self, '_components_and_classes_cache', None) is not None:
cache = self._components_and_classes_cache
if cache[0] == wcs_hash:
return cache[1]
else:
self._components_and_classes_cache = None
# Avoid circular imports by importing here
from astropy.wcs.utils import wcs_to_celestial_frame
from astropy.coordinates import SkyCoord, EarthLocation
from astropy.time.formats import FITS_DEPRECATED_SCALES
from astropy.time import Time, TimeDelta
components = [None] * self.naxis
classes = {}
# Let's start off by checking whether the WCS has a pair of celestial
# components
if self.has_celestial:
try:
celestial_frame = wcs_to_celestial_frame(self)
except ValueError:
# Some WCSes, e.g. solar, can be recognized by WCSLIB as being
# celestial but we don't necessarily have frames for them.
celestial_frame = None
else:
kwargs = {}
kwargs['frame'] = celestial_frame
kwargs['unit'] = u.deg
classes['celestial'] = (SkyCoord, (), kwargs)
components[self.wcs.lng] = ('celestial', 0, 'spherical.lon.degree')
components[self.wcs.lat] = ('celestial', 1, 'spherical.lat.degree')
# Next, we check for spectral components
if self.has_spectral:
# Find index of spectral coordinate
ispec = self.wcs.spec
ctype = self.wcs.ctype[ispec][:4]
ctype = ctype.upper()
kwargs = {}
# Determine observer location and velocity
# TODO: determine how WCS standard would deal with observer on a
# spacecraft far from earth. For now assume the obsgeo parameters,
# if present, give the geocentric observer location.
if np.isnan(self.wcs.obsgeo[0]):
observer = None
else:
earth_location = EarthLocation(*self.wcs.obsgeo[:3], unit=u.m)
# Get the time scale from TIMESYS or fall back to 'utc'
tscale = self.wcs.timesys or 'utc'
if np.isnan(self.wcs.mjdavg):
obstime = Time(self.wcs.mjdobs, format='mjd', scale=tscale,
location=earth_location)
else:
obstime = Time(self.wcs.mjdavg, format='mjd', scale=tscale,
location=earth_location)
observer_location = SkyCoord(earth_location.get_itrs(obstime=obstime))
if self.wcs.specsys in VELOCITY_FRAMES:
frame = VELOCITY_FRAMES[self.wcs.specsys]
observer = observer_location.transform_to(frame)
if isinstance(frame, str):
observer = attach_zero_velocities(observer)
else:
observer = update_differentials_to_match(observer_location,
VELOCITY_FRAMES[self.wcs.specsys],
preserve_observer_frame=True)
elif self.wcs.specsys == 'TOPOCENT':
observer = attach_zero_velocities(observer_location)
else:
raise NotImplementedError(f'SPECSYS={self.wcs.specsys} not yet supported')
# Determine target
# This is tricker. In principle the target for each pixel is the
# celestial coordinates of the pixel, but we then need to be very
# careful about SSYSOBS which is tricky. For now, we set the
# target using the reference celestial coordinate in the WCS (if
# any).
if self.has_celestial and celestial_frame is not None:
# NOTE: celestial_frame was defined higher up
# NOTE: we set the distance explicitly to avoid warnings in SpectralCoord
target = SkyCoord(self.wcs.crval[self.wcs.lng] * self.wcs.cunit[self.wcs.lng],
self.wcs.crval[self.wcs.lat] * self.wcs.cunit[self.wcs.lat],
frame=celestial_frame,
distance=1000 * u.kpc)
target = attach_zero_velocities(target)
else:
target = None
# SpectralCoord does not work properly if either observer or target
# are not convertible to ICRS, so if this is the case, we (for now)
# drop the observer and target from the SpectralCoord and warn the
# user.
if observer is not None:
try:
observer.transform_to(ICRS())
except Exception:
warnings.warn('observer cannot be converted to ICRS, so will '
'not be set on SpectralCoord', AstropyUserWarning)
observer = None
if target is not None:
try:
target.transform_to(ICRS())
except Exception:
warnings.warn('target cannot be converted to ICRS, so will '
'not be set on SpectralCoord', AstropyUserWarning)
target = None
# NOTE: below we include Quantity in classes['spectral'] instead
# of SpectralCoord - this is because we want to also be able to
# accept plain quantities.
if ctype == 'ZOPT':
def spectralcoord_from_redshift(redshift):
if isinstance(redshift, SpectralCoord):
return redshift
return SpectralCoord((redshift + 1) * self.wcs.restwav,
unit=u.m, observer=observer, target=target)
def redshift_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
if (observer is None
or spectralcoord.observer is None
or spectralcoord.target is None):
if observer is None:
msg = 'No observer defined on WCS'
elif spectralcoord.observer is None:
msg = 'No observer defined on SpectralCoord'
else:
msg = 'No target defined on SpectralCoord'
warnings.warn(f'{msg}, SpectralCoord '
'will be converted without any velocity '
'frame change', AstropyUserWarning)
return spectralcoord.to_value(u.m) / self.wcs.restwav - 1.
else:
return spectralcoord.with_observer_stationary_relative_to(observer).to_value(u.m) / self.wcs.restwav - 1.
classes['spectral'] = (u.Quantity, (), {}, spectralcoord_from_redshift)
components[self.wcs.spec] = ('spectral', 0, redshift_from_spectralcoord)
elif ctype == 'BETA':
def spectralcoord_from_beta(beta):
if isinstance(beta, SpectralCoord):
return beta
return SpectralCoord(beta * C_SI,
unit=u.m / u.s,
doppler_convention='relativistic',
doppler_rest=self.wcs.restwav * u.m,
observer=observer, target=target)
def beta_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
doppler_equiv = u.doppler_relativistic(self.wcs.restwav * u.m)
if (observer is None
or spectralcoord.observer is None
or spectralcoord.target is None):
if observer is None:
msg = 'No observer defined on WCS'
elif spectralcoord.observer is None:
msg = 'No observer defined on SpectralCoord'
else:
msg = 'No target defined on SpectralCoord'
warnings.warn(f'{msg}, SpectralCoord '
'will be converted without any velocity '
'frame change', AstropyUserWarning)
return spectralcoord.to_value(u.m / u.s, doppler_equiv) / C_SI
else:
return spectralcoord.with_observer_stationary_relative_to(observer).to_value(u.m / u.s, doppler_equiv) / C_SI
classes['spectral'] = (u.Quantity, (), {}, spectralcoord_from_beta)
components[self.wcs.spec] = ('spectral', 0, beta_from_spectralcoord)
else:
kwargs['unit'] = self.wcs.cunit[ispec]
if self.wcs.restfrq > 0:
if ctype == 'VELO':
kwargs['doppler_convention'] = 'relativistic'
kwargs['doppler_rest'] = self.wcs.restfrq * u.Hz
elif ctype == 'VRAD':
kwargs['doppler_convention'] = 'radio'
kwargs['doppler_rest'] = self.wcs.restfrq * u.Hz
elif ctype == 'VOPT':
kwargs['doppler_convention'] = 'optical'
kwargs['doppler_rest'] = self.wcs.restwav * u.m
def spectralcoord_from_value(value):
if isinstance(value, SpectralCoord):
return value
return SpectralCoord(value, observer=observer, target=target, **kwargs)
def value_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
if (observer is None
or spectralcoord.observer is None
or spectralcoord.target is None):
if observer is None:
msg = 'No observer defined on WCS'
elif spectralcoord.observer is None:
msg = 'No observer defined on SpectralCoord'
else:
msg = 'No target defined on SpectralCoord'
warnings.warn(f'{msg}, SpectralCoord '
'will be converted without any velocity '
'frame change', AstropyUserWarning)
return spectralcoord.to_value(**kwargs)
else:
return spectralcoord.with_observer_stationary_relative_to(observer).to_value(**kwargs)
classes['spectral'] = (u.Quantity, (), {}, spectralcoord_from_value)
components[self.wcs.spec] = ('spectral', 0, value_from_spectralcoord)
# We can then make sure we correctly return Time objects where appropriate
# (https://www.aanda.org/articles/aa/pdf/2015/02/aa24653-14.pdf)
if 'time' in self.world_axis_physical_types:
multiple_time = self.world_axis_physical_types.count('time') > 1
for i in range(self.naxis):
if self.world_axis_physical_types[i] == 'time':
if multiple_time:
name = f'time.{i}'
else:
name = 'time'
# Initialize delta
reference_time_delta = None
# Extract time scale
scale = self.wcs.ctype[i].lower()
if scale == 'time':
if self.wcs.timesys:
scale = self.wcs.timesys.lower()
else:
scale = 'utc'
# Drop sub-scales
if '(' in scale:
pos = scale.index('(')
scale, subscale = scale[:pos], scale[pos+1:-1]
warnings.warn(f'Dropping unsupported sub-scale '
f'{subscale.upper()} from scale {scale.upper()}',
UserWarning)
# TODO: consider having GPS as a scale in Time
# For now GPS is not a scale, we approximate this by TAI - 19s
if scale == 'gps':
reference_time_delta = TimeDelta(19, format='sec')
scale = 'tai'
elif scale.upper() in FITS_DEPRECATED_SCALES:
scale = FITS_DEPRECATED_SCALES[scale.upper()]
elif scale not in Time.SCALES:
raise ValueError(f'Unrecognized time CTYPE={self.wcs.ctype[i]}')
# Determine location
trefpos = self.wcs.trefpos.lower()
if trefpos.startswith('topocent'):
# Note that some headers use TOPOCENT instead of TOPOCENTER
if np.any(np.isnan(self.wcs.obsgeo[:3])):
warnings.warn('Missing or incomplete observer location '
'information, setting location in Time to None',
UserWarning)
location = None
else:
location = EarthLocation(*self.wcs.obsgeo[:3], unit=u.m)
elif trefpos == 'geocenter':
location = EarthLocation(0, 0, 0, unit=u.m)
elif trefpos == '':
location = None
else:
# TODO: implement support for more locations when Time supports it
warnings.warn(f"Observation location '{trefpos}' is not "
"supported, setting location in Time to None", UserWarning)
location = None
reference_time = Time(np.nan_to_num(self.wcs.mjdref[0]),
np.nan_to_num(self.wcs.mjdref[1]),
format='mjd', scale=scale,
location=location)
if reference_time_delta is not None:
reference_time = reference_time + reference_time_delta
def time_from_reference_and_offset(offset):
if isinstance(offset, Time):
return offset
return reference_time + TimeDelta(offset, format='sec')
def offset_from_time_and_reference(time):
return (time - reference_time).sec
classes[name] = (Time, (), {}, time_from_reference_and_offset)
components[i] = (name, 0, offset_from_time_and_reference)
# Fallback: for any remaining components that haven't been identified, just
# return Quantity as the class to use
for i in range(self.naxis):
if components[i] is None:
name = self.wcs.ctype[i].split('-')[0].lower()
if name == '':
name = 'world'
while name in classes:
name += "_"
classes[name] = (u.Quantity, (), {'unit': self.wcs.cunit[i]})
components[i] = (name, 0, 'value')
# Keep a cached version of result
self._components_and_classes_cache = wcs_hash, (components, classes)
return components, classes
|
e029a7fcd54265dec9d892047e844b6a34e1459148454fca12f47bae21000d2e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from contextlib import nullcontext
import pytest
from packaging.version import Version
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal, assert_allclose
from astropy.utils.data import get_pkg_data_contents, get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from astropy.time import Time
from astropy import units as u
from astropy.utils import unbroadcast
from astropy.coordinates import SkyCoord, EarthLocation, ITRS
from astropy.units import Quantity
from astropy.io import fits
from astropy.wcs import _wcs # noqa
from astropy.wcs.wcs import (WCS, Sip, WCSSUB_LONGITUDE, WCSSUB_LATITUDE,
FITSFixedWarning)
from astropy.wcs.wcsapi.fitswcs import SlicedFITSWCS
from astropy.wcs.utils import (proj_plane_pixel_scales,
is_proj_plane_distorted,
non_celestial_pixel_scales,
wcs_to_celestial_frame,
celestial_frame_to_wcs, skycoord_to_pixel,
pixel_to_skycoord, custom_wcs_to_frame_mappings,
custom_frame_to_wcs_mappings,
add_stokes_axis_to_wcs,
pixel_to_pixel,
_split_matrix,
_pixel_to_pixel_correlation_matrix,
_pixel_to_world_correlation_matrix,
local_partial_pixel_derivatives,
fit_wcs_from_points,
obsgeo_to_frame)
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
def test_wcs_dropping():
wcs = WCS(naxis=4)
wcs.wcs.pc = np.zeros([4, 4])
np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5))
pc = wcs.wcs.pc # for later use below
dropped = wcs.dropaxis(0)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4]))
dropped = wcs.dropaxis(1)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4]))
dropped = wcs.dropaxis(2)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4]))
dropped = wcs.dropaxis(3)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3]))
wcs = WCS(naxis=4)
wcs.wcs.cd = pc
dropped = wcs.dropaxis(0)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4]))
dropped = wcs.dropaxis(1)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4]))
dropped = wcs.dropaxis(2)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4]))
dropped = wcs.dropaxis(3)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3]))
def test_wcs_swapping():
wcs = WCS(naxis=4)
wcs.wcs.pc = np.zeros([4, 4])
np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5))
pc = wcs.wcs.pc # for later use below
swapped = wcs.swapaxes(0, 1)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4]))
swapped = wcs.swapaxes(0, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1]))
swapped = wcs.swapaxes(2, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3]))
wcs = WCS(naxis=4)
wcs.wcs.cd = pc
swapped = wcs.swapaxes(0, 1)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4]))
swapped = wcs.swapaxes(0, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1]))
swapped = wcs.swapaxes(2, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3]))
@pytest.mark.parametrize('ndim', (2, 3))
def test_add_stokes(ndim):
wcs = WCS(naxis=ndim)
for ii in range(ndim + 1):
outwcs = add_stokes_axis_to_wcs(wcs, ii)
assert outwcs.wcs.naxis == ndim + 1
assert outwcs.wcs.ctype[ii] == 'STOKES'
assert outwcs.wcs.cname[ii] == 'STOKES'
def test_slice():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
mywcs._naxis = [1000, 500]
pscale = 0.1 # from cdelt
slice_wcs = mywcs.slice([slice(1, None), slice(0, None)])
assert np.all(slice_wcs.wcs.crpix == np.array([1, 0]))
assert slice_wcs._naxis == [1000, 499]
# test that CRPIX maps to CRVAL:
assert_allclose(
slice_wcs.wcs_pix2world(*slice_wcs.wcs.crpix, 1),
slice_wcs.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)])
assert np.all(slice_wcs.wcs.crpix == np.array([0.625, 0.25]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))
assert slice_wcs._naxis == [250, 250]
slice_wcs = mywcs.slice([slice(None, None, 2), slice(0, None, 2)])
assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.2]))
assert slice_wcs._naxis == [500, 250]
# Non-integral values do not alter the naxis attribute
with pytest.warns(AstropyUserWarning):
slice_wcs = mywcs.slice([slice(50.), slice(20.)])
assert slice_wcs._naxis == [1000, 500]
with pytest.warns(AstropyUserWarning):
slice_wcs = mywcs.slice([slice(50.), slice(20)])
assert slice_wcs._naxis == [20, 500]
with pytest.warns(AstropyUserWarning):
slice_wcs = mywcs.slice([slice(50), slice(20.5)])
assert slice_wcs._naxis == [1000, 50]
def test_slice_with_sip():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
mywcs._naxis = [1000, 500]
mywcs.wcs.ctype = ['RA---TAN-SIP', 'DEC--TAN-SIP']
a = np.array(
[[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
[0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
[-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
[-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
[-2.81029767e-13, 0.0, 0.0, 0.0, 0.0]]
)
b = np.array(
[[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
[0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
[6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
[3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
[-5.92438525e-13, 0.0, 0.0, 0.0, 0.0]]
)
mywcs.sip = Sip(a, b, None, None, mywcs.wcs.crpix)
mywcs.wcs.set()
pscale = 0.1 # from cdelt
slice_wcs = mywcs.slice([slice(1, None), slice(0, None)])
# test that CRPIX maps to CRVAL:
assert_allclose(
slice_wcs.all_pix2world(*slice_wcs.wcs.crpix, 1),
slice_wcs.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)])
# test that CRPIX maps to CRVAL:
assert_allclose(
slice_wcs.all_pix2world(*slice_wcs.wcs.crpix, 1),
slice_wcs.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
def test_slice_getitem():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
slice_wcs = mywcs[1::2, 0::4]
assert np.all(slice_wcs.wcs.crpix == np.array([0.625, 0.25]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))
mywcs.wcs.crpix = [2, 2]
slice_wcs = mywcs[1::2, 0::4]
assert np.all(slice_wcs.wcs.crpix == np.array([0.875, 0.75]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))
# Default: numpy order
slice_wcs = mywcs[1::2]
assert np.all(slice_wcs.wcs.crpix == np.array([2, 0.75]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.1, 0.2]))
def test_slice_fitsorder():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
slice_wcs = mywcs.slice([slice(1, None), slice(0, None)], numpy_order=False)
assert np.all(slice_wcs.wcs.crpix == np.array([0, 1]))
slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)], numpy_order=False)
assert np.all(slice_wcs.wcs.crpix == np.array([0.25, 0.625]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.4]))
slice_wcs = mywcs.slice([slice(1, None, 2)], numpy_order=False)
assert np.all(slice_wcs.wcs.crpix == np.array([0.25, 1]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.1]))
def test_slice_wcs():
mywcs = WCS(naxis=2)
sub = mywcs[0]
assert isinstance(sub, SlicedFITSWCS)
with pytest.raises(IndexError) as exc:
mywcs[0, ::2]
assert exc.value.args[0] == "Slicing WCS with a step is not supported."
def test_axis_names():
mywcs = WCS(naxis=4)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT-LSR', 'STOKES']
assert mywcs.axis_type_names == ['RA', 'DEC', 'VOPT', 'STOKES']
mywcs.wcs.cname = ['RA', 'DEC', 'VOPT', 'STOKES']
assert mywcs.axis_type_names == ['RA', 'DEC', 'VOPT', 'STOKES']
def test_celestial():
mywcs = WCS(naxis=4)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT', 'STOKES']
cel = mywcs.celestial
assert tuple(cel.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert cel.axis_type_names == ['RA', 'DEC']
def test_wcs_to_celestial_frame():
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates.builtin_frames import ICRS, ITRS, FK5, FK4, Galactic
mywcs = WCS(naxis=2)
mywcs.wcs.set()
with pytest.raises(ValueError, match="Could not determine celestial frame "
"corresponding to the specified WCS object"):
assert wcs_to_celestial_frame(mywcs) is None
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['XOFFSET', 'YOFFSET']
mywcs.wcs.set()
with pytest.raises(ValueError):
assert wcs_to_celestial_frame(mywcs) is None
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
mywcs.wcs.equinox = 1987.
mywcs.wcs.set()
print(mywcs.to_header())
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, FK5)
assert frame.equinox == Time(1987., format='jyear')
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
mywcs.wcs.equinox = 1982
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, FK4)
assert frame.equinox == Time(1982., format='byear')
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['GLON-SIN', 'GLAT-SIN']
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, Galactic)
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['TLON-CAR', 'TLAT-CAR']
mywcs.wcs.dateobs = '2017-08-17T12:41:04.430'
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ITRS)
assert frame.obstime == Time('2017-08-17T12:41:04.430')
for equinox in [np.nan, 1987, 1982]:
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
mywcs.wcs.radesys = 'ICRS'
mywcs.wcs.equinox = equinox
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
# Flipped order
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['DEC--TAN', 'RA---TAN']
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
# More than two dimensions
mywcs = WCS(naxis=3)
mywcs.wcs.ctype = ['DEC--TAN', 'VELOCITY', 'RA---TAN']
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
mywcs = WCS(naxis=3)
mywcs.wcs.ctype = ['GLAT-CAR', 'VELOCITY', 'GLON-CAR']
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, Galactic)
def test_wcs_to_celestial_frame_correlated():
# Regression test for a bug that caused wcs_to_celestial_frame to fail when
# the celestial axes were correlated with other axes.
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates.builtin_frames import ICRS
mywcs = WCS(naxis=3)
mywcs.wcs.ctype = 'RA---TAN', 'DEC--TAN', 'FREQ'
mywcs.wcs.cd = np.ones((3, 3))
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
def test_wcs_to_celestial_frame_extend():
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['XOFFSET', 'YOFFSET']
mywcs.wcs.set()
with pytest.raises(ValueError):
wcs_to_celestial_frame(mywcs)
class OffsetFrame:
pass
def identify_offset(wcs):
if wcs.wcs.ctype[0].endswith('OFFSET') and wcs.wcs.ctype[1].endswith('OFFSET'):
return OffsetFrame()
with custom_wcs_to_frame_mappings(identify_offset):
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, OffsetFrame)
# Check that things are back to normal after the context manager
with pytest.raises(ValueError):
wcs_to_celestial_frame(mywcs)
def test_celestial_frame_to_wcs():
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import ICRS, ITRS, FK5, FK4, FK4NoETerms, Galactic, BaseCoordinateFrame
class FakeFrame(BaseCoordinateFrame):
pass
frame = FakeFrame()
with pytest.raises(ValueError) as exc:
celestial_frame_to_wcs(frame)
assert exc.value.args[0] == ("Could not determine WCS corresponding to "
"the specified coordinate frame.")
frame = ICRS()
mywcs = celestial_frame_to_wcs(frame)
mywcs.wcs.set()
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'ICRS'
assert np.isnan(mywcs.wcs.equinox)
assert mywcs.wcs.lonpole == 180
assert mywcs.wcs.latpole == 0
frame = FK5(equinox='J1987')
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'FK5'
assert mywcs.wcs.equinox == 1987.
frame = FK4(equinox='B1982')
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'FK4'
assert mywcs.wcs.equinox == 1982.
frame = FK4NoETerms(equinox='B1982')
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'FK4-NO-E'
assert mywcs.wcs.equinox == 1982.
frame = Galactic()
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('GLON-TAN', 'GLAT-TAN')
assert mywcs.wcs.radesys == ''
assert np.isnan(mywcs.wcs.equinox)
frame = Galactic()
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
assert tuple(mywcs.wcs.ctype) == ('GLON-CAR', 'GLAT-CAR')
assert mywcs.wcs.radesys == ''
assert np.isnan(mywcs.wcs.equinox)
frame = Galactic()
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
mywcs.wcs.crval = [100, -30]
mywcs.wcs.set()
assert_allclose((mywcs.wcs.lonpole, mywcs.wcs.latpole), (180, 60))
frame = ITRS(obstime=Time('2017-08-17T12:41:04.43'))
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
assert tuple(mywcs.wcs.ctype) == ('TLON-CAR', 'TLAT-CAR')
assert mywcs.wcs.radesys == 'ITRS'
assert mywcs.wcs.dateobs == '2017-08-17T12:41:04.430'
frame = ITRS()
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
assert tuple(mywcs.wcs.ctype) == ('TLON-CAR', 'TLAT-CAR')
assert mywcs.wcs.radesys == 'ITRS'
assert mywcs.wcs.dateobs == Time('J2000').utc.fits
def test_celestial_frame_to_wcs_extend():
class OffsetFrame:
pass
frame = OffsetFrame()
with pytest.raises(ValueError):
celestial_frame_to_wcs(frame)
def identify_offset(frame, projection=None):
if isinstance(frame, OffsetFrame):
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['XOFFSET', 'YOFFSET']
return wcs
with custom_frame_to_wcs_mappings(identify_offset):
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('XOFFSET', 'YOFFSET')
# Check that things are back to normal after the context manager
with pytest.raises(ValueError):
celestial_frame_to_wcs(frame)
def test_pixscale_nodrop():
mywcs = WCS(naxis=2)
mywcs.wcs.cdelt = [0.1, 0.2]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))
mywcs.wcs.cdelt = [-0.1, 0.2]
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))
def test_pixscale_withdrop():
mywcs = WCS(naxis=3)
mywcs.wcs.cdelt = [0.1, 0.2, 1]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT']
assert_almost_equal(proj_plane_pixel_scales(mywcs.celestial), (0.1, 0.2))
mywcs.wcs.cdelt = [-0.1, 0.2, 1]
assert_almost_equal(proj_plane_pixel_scales(mywcs.celestial), (0.1, 0.2))
def test_pixscale_cd():
mywcs = WCS(naxis=2)
mywcs.wcs.cd = [[-0.1, 0], [0, 0.2]]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))
@pytest.mark.parametrize('angle',
(30, 45, 60, 75))
def test_pixscale_cd_rotated(angle):
mywcs = WCS(naxis=2)
rho = np.radians(angle)
scale = 0.1
mywcs.wcs.cd = [[scale * np.cos(rho), -scale * np.sin(rho)],
[scale * np.sin(rho), scale * np.cos(rho)]]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.1))
@pytest.mark.parametrize('angle',
(30, 45, 60, 75))
def test_pixscale_pc_rotated(angle):
mywcs = WCS(naxis=2)
rho = np.radians(angle)
scale = 0.1
mywcs.wcs.cdelt = [-scale, scale]
mywcs.wcs.pc = [[np.cos(rho), -np.sin(rho)],
[np.sin(rho), np.cos(rho)]]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.1))
@pytest.mark.parametrize(('cdelt', 'pc', 'pccd'),
(([0.1, 0.2], np.eye(2), np.diag([0.1, 0.2])),
([0.1, 0.2, 0.3], np.eye(3), np.diag([0.1, 0.2, 0.3])),
([1, 1, 1], np.diag([0.1, 0.2, 0.3]), np.diag([0.1, 0.2, 0.3]))))
def test_pixel_scale_matrix(cdelt, pc, pccd):
mywcs = WCS(naxis=(len(cdelt)))
mywcs.wcs.cdelt = cdelt
mywcs.wcs.pc = pc
assert_almost_equal(mywcs.pixel_scale_matrix, pccd)
@pytest.mark.parametrize(('ctype', 'cel'),
((['RA---TAN', 'DEC--TAN'], True),
(['RA---TAN', 'DEC--TAN', 'FREQ'], False),
(['RA---TAN', 'FREQ'], False),))
def test_is_celestial(ctype, cel):
mywcs = WCS(naxis=len(ctype))
mywcs.wcs.ctype = ctype
assert mywcs.is_celestial == cel
@pytest.mark.parametrize(('ctype', 'cel'),
((['RA---TAN', 'DEC--TAN'], True),
(['RA---TAN', 'DEC--TAN', 'FREQ'], True),
(['RA---TAN', 'FREQ'], False),))
def test_has_celestial(ctype, cel):
mywcs = WCS(naxis=len(ctype))
mywcs.wcs.ctype = ctype
assert mywcs.has_celestial == cel
def test_has_celestial_correlated():
# Regression test for astropy/astropy#8416 - has_celestial failed when
# celestial axes were correlated with other axes.
mywcs = WCS(naxis=3)
mywcs.wcs.ctype = 'RA---TAN', 'DEC--TAN', 'FREQ'
mywcs.wcs.cd = np.ones((3, 3))
mywcs.wcs.set()
assert mywcs.has_celestial
@pytest.mark.parametrize(('cdelt', 'pc', 'cd', 'check_warning'),
((np.array([0.1, 0.2]), np.eye(2), np.eye(2), True),
(np.array([1, 1]), np.diag([0.1, 0.2]), np.eye(2), True),
(np.array([0.1, 0.2]), np.eye(2), None, False),
(np.array([0.1, 0.2]), None, np.eye(2), True),
))
def test_noncelestial_scale(cdelt, pc, cd, check_warning):
mywcs = WCS(naxis=2)
if cd is not None:
mywcs.wcs.cd = cd
if pc is not None:
mywcs.wcs.pc = pc
# TODO: Some inputs emit RuntimeWarning from here onwards.
# Fix the test data. See @nden's comment in PR 9010.
if check_warning:
ctx = pytest.warns()
else:
ctx = nullcontext()
with ctx as warning_lines:
mywcs.wcs.cdelt = cdelt
if check_warning:
for w in warning_lines:
assert issubclass(w.category, RuntimeWarning)
assert 'cdelt will be ignored since cd is present' in str(w.message)
mywcs.wcs.ctype = ['RA---TAN', 'FREQ']
ps = non_celestial_pixel_scales(mywcs)
assert_almost_equal(ps.to_value(u.deg), np.array([0.1, 0.2]))
@pytest.mark.parametrize('mode', ['all', 'wcs'])
def test_skycoord_to_pixel(mode):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import SkyCoord
header = get_pkg_data_contents('data/maps/1904-66_TAN.hdr', encoding='binary')
wcs = WCS(header)
ref = SkyCoord(0.1 * u.deg, -89. * u.deg, frame='icrs')
xp, yp = skycoord_to_pixel(ref, wcs, mode=mode)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode).transform_to('icrs')
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
# Make sure you can specify a different class using ``cls`` keyword
class SkyCoord2(SkyCoord):
pass
new2 = pixel_to_skycoord(xp, yp, wcs, mode=mode,
cls=SkyCoord2).transform_to('icrs')
assert new2.__class__ is SkyCoord2
assert_allclose(new2.ra.degree, ref.ra.degree)
assert_allclose(new2.dec.degree, ref.dec.degree)
def test_skycoord_to_pixel_swapped():
# Regression test for a bug that caused skycoord_to_pixel and
# pixel_to_skycoord to not work correctly if the axes were swapped in the
# WCS.
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import SkyCoord
header = get_pkg_data_contents('data/maps/1904-66_TAN.hdr', encoding='binary')
wcs = WCS(header)
wcs_swapped = wcs.sub([WCSSUB_LATITUDE, WCSSUB_LONGITUDE])
ref = SkyCoord(0.1 * u.deg, -89. * u.deg, frame='icrs')
xp1, yp1 = skycoord_to_pixel(ref, wcs)
xp2, yp2 = skycoord_to_pixel(ref, wcs_swapped)
assert_allclose(xp1, xp2)
assert_allclose(yp1, yp2)
# WCS is in FK5 so we need to transform back to ICRS
new1 = pixel_to_skycoord(xp1, yp1, wcs).transform_to('icrs')
new2 = pixel_to_skycoord(xp1, yp1, wcs_swapped).transform_to('icrs')
assert_allclose(new1.ra.degree, new2.ra.degree)
assert_allclose(new1.dec.degree, new2.dec.degree)
def test_is_proj_plane_distorted():
# non-orthogonal CD:
wcs = WCS(naxis=2)
wcs.wcs.cd = [[-0.1, 0], [0, 0.2]]
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert(is_proj_plane_distorted(wcs))
# almost orthogonal CD:
wcs.wcs.cd = [[0.1 + 2.0e-7, 1.7e-7], [1.2e-7, 0.1 - 1.3e-7]]
assert(not is_proj_plane_distorted(wcs))
# real case:
header = get_pkg_data_filename('data/sip.fits')
with pytest.warns(FITSFixedWarning):
wcs = WCS(header)
assert(is_proj_plane_distorted(wcs))
@pytest.mark.parametrize('mode', ['all', 'wcs'])
def test_skycoord_to_pixel_distortions(mode):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import SkyCoord
header = get_pkg_data_filename('data/sip.fits')
with pytest.warns(FITSFixedWarning):
wcs = WCS(header)
ref = SkyCoord(202.50 * u.deg, 47.19 * u.deg, frame='icrs')
xp, yp = skycoord_to_pixel(ref, wcs, mode=mode)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode).transform_to('icrs')
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
@pytest.fixture
def spatial_wcs_2d_small_angle():
"""
This WCS has an almost linear correlation between the pixel and world axes
close to the reference pixel.
"""
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['HPLN-TAN', 'HPLT-TAN']
wcs.wcs.crpix = [3.0] * 2
wcs.wcs.cdelt = [0.002] * 2
wcs.wcs.crval = [0] * 2
wcs.wcs.set()
return wcs
def test_local_pixel_derivatives(spatial_wcs_2d_small_angle):
not_diag = np.logical_not(np.diag([1, 1]))
# At (or close to) the reference pixel this should equal the cdelt
derivs = local_partial_pixel_derivatives(spatial_wcs_2d_small_angle, 3, 3)
np.testing.assert_allclose(np.diag(derivs), spatial_wcs_2d_small_angle.wcs.cdelt)
np.testing.assert_allclose(derivs[not_diag].flat, [0, 0], atol=1e-10)
# Far away from the reference pixel this should not equal the cdelt
derivs = local_partial_pixel_derivatives(spatial_wcs_2d_small_angle, 3e4, 3e4)
assert not np.allclose(np.diag(derivs), spatial_wcs_2d_small_angle.wcs.cdelt)
# At (or close to) the reference pixel this should equal the cdelt
derivs = local_partial_pixel_derivatives(
spatial_wcs_2d_small_angle, 3, 3, normalize_by_world=True)
np.testing.assert_allclose(np.diag(derivs), [1, 1])
np.testing.assert_allclose(derivs[not_diag].flat, [0, 0], atol=1e-8)
def test_pixel_to_world_correlation_matrix_celestial():
wcs = WCS(naxis=2)
wcs.wcs.ctype = 'RA---TAN', 'DEC--TAN'
wcs.wcs.set()
assert_equal(wcs.axis_correlation_matrix, [[1, 1], [1, 1]])
matrix, classes = _pixel_to_world_correlation_matrix(wcs)
assert_equal(matrix, [[1, 1]])
assert classes == [SkyCoord]
def test_pixel_to_world_correlation_matrix_spectral_cube_uncorrelated():
wcs = WCS(naxis=3)
wcs.wcs.ctype = 'RA---TAN', 'FREQ', 'DEC--TAN'
wcs.wcs.set()
assert_equal(wcs.axis_correlation_matrix, [[1, 0, 1], [0, 1, 0], [1, 0, 1]])
matrix, classes = _pixel_to_world_correlation_matrix(wcs)
assert_equal(matrix, [[1, 0, 1], [0, 1, 0]])
assert classes == [SkyCoord, Quantity]
def test_pixel_to_world_correlation_matrix_spectral_cube_correlated():
wcs = WCS(naxis=3)
wcs.wcs.ctype = 'RA---TAN', 'FREQ', 'DEC--TAN'
wcs.wcs.cd = np.ones((3, 3))
wcs.wcs.set()
assert_equal(wcs.axis_correlation_matrix, [[1, 1, 1], [1, 1, 1], [1, 1, 1]])
matrix, classes = _pixel_to_world_correlation_matrix(wcs)
assert_equal(matrix, [[1, 1, 1], [1, 1, 1]])
assert classes == [SkyCoord, Quantity]
def test_pixel_to_pixel_correlation_matrix_celestial():
wcs_in = WCS(naxis=2)
wcs_in.wcs.ctype = 'RA---TAN', 'DEC--TAN'
wcs_in.wcs.set()
wcs_out = WCS(naxis=2)
wcs_out.wcs.ctype = 'DEC--TAN', 'RA---TAN'
wcs_out.wcs.set()
matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)
assert_equal(matrix, [[1, 1], [1, 1]])
def test_pixel_to_pixel_correlation_matrix_spectral_cube_uncorrelated():
wcs_in = WCS(naxis=3)
wcs_in.wcs.ctype = 'RA---TAN', 'DEC--TAN', 'FREQ'
wcs_in.wcs.set()
wcs_out = WCS(naxis=3)
wcs_out.wcs.ctype = 'DEC--TAN', 'FREQ', 'RA---TAN'
wcs_out.wcs.set()
matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)
assert_equal(matrix, [[1, 1, 0], [0, 0, 1], [1, 1, 0]])
def test_pixel_to_pixel_correlation_matrix_spectral_cube_correlated():
# NOTE: only make one of the WCSes have correlated axes to really test this
wcs_in = WCS(naxis=3)
wcs_in.wcs.ctype = 'RA---TAN', 'DEC--TAN', 'FREQ'
wcs_in.wcs.set()
wcs_out = WCS(naxis=3)
wcs_out.wcs.ctype = 'DEC--TAN', 'FREQ', 'RA---TAN'
wcs_out.wcs.cd = np.ones((3, 3))
wcs_out.wcs.set()
matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)
assert_equal(matrix, [[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_pixel_to_pixel_correlation_matrix_mismatch():
wcs_in = WCS(naxis=2)
wcs_in.wcs.ctype = 'RA---TAN', 'DEC--TAN'
wcs_in.wcs.set()
wcs_out = WCS(naxis=3)
wcs_out.wcs.ctype = 'DEC--TAN', 'FREQ', 'RA---TAN'
wcs_out.wcs.set()
with pytest.raises(ValueError) as exc:
_pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)
assert exc.value.args[0] == "The two WCS return a different number of world coordinates"
wcs3 = WCS(naxis=2)
wcs3.wcs.ctype = 'FREQ', 'PIXEL'
wcs3.wcs.set()
with pytest.raises(ValueError) as exc:
_pixel_to_pixel_correlation_matrix(wcs_out, wcs3)
assert exc.value.args[0] == "The world coordinate types of the two WCS do not match"
wcs4 = WCS(naxis=4)
wcs4.wcs.ctype = 'RA---TAN', 'DEC--TAN', 'Q1', 'Q2'
wcs4.wcs.cunit = ['deg', 'deg', 'm/s', 'm/s']
wcs4.wcs.set()
wcs5 = WCS(naxis=4)
wcs5.wcs.ctype = 'Q1', 'RA---TAN', 'DEC--TAN', 'Q2'
wcs5.wcs.cunit = ['m/s', 'deg', 'deg', 'm/s']
wcs5.wcs.set()
with pytest.raises(ValueError, match="World coordinate order doesn't match "
"and automatic matching is ambiguous"):
_pixel_to_pixel_correlation_matrix(wcs4, wcs5)
def test_pixel_to_pixel_correlation_matrix_nonsquare():
# Here we set up an input WCS that maps 3 pixel coordinates to 4 world
# coordinates - the idea is to make sure that things work fine in cases
# where the number of input and output pixel coordinates do not match.
class FakeWCS:
pass
wcs_in = FakeWCS()
wcs_in.low_level_wcs = wcs_in
wcs_in.pixel_n_dim = 3
wcs_in.world_n_dim = 4
wcs_in.axis_correlation_matrix = [[True, True, False],
[True, True, False],
[True, True, False],
[False, False, True]]
wcs_in.world_axis_object_components = [('spat', 'ra', 'ra.degree'),
('spat', 'dec', 'dec.degree'),
('spec', 0, 'value'),
('time', 0, 'utc.value')]
wcs_in.world_axis_object_classes = {'spat': ('astropy.coordinates.SkyCoord', (),
{'frame': 'icrs'}),
'spec': ('astropy.units.Wavelength', (None,), {}),
'time': ('astropy.time.Time', (None,),
{'format': 'mjd', 'scale': 'utc'})}
wcs_out = FakeWCS()
wcs_out.low_level_wcs = wcs_out
wcs_out.pixel_n_dim = 4
wcs_out.world_n_dim = 4
wcs_out.axis_correlation_matrix = [[True, False, False, False],
[False, True, True, False],
[False, True, True, False],
[False, False, False, True]]
wcs_out.world_axis_object_components = [('spec', 0, 'value'),
('spat', 'ra', 'ra.degree'),
('spat', 'dec', 'dec.degree'),
('time', 0, 'utc.value')]
wcs_out.world_axis_object_classes = wcs_in.world_axis_object_classes
matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)
matrix = matrix.astype(int)
# The shape should be (n_pixel_out, n_pixel_in)
assert matrix.shape == (4, 3)
expected = np.array([[1, 1, 0], [1, 1, 0], [1, 1, 0], [0, 0, 1]])
assert_equal(matrix, expected)
def test_split_matrix():
assert _split_matrix(np.array([[1]])) == [([0], [0])]
assert _split_matrix(np.array([[1, 1],
[1, 1]])) == [([0, 1], [0, 1])]
assert _split_matrix(np.array([[1, 1, 0],
[1, 1, 0],
[0, 0, 1]])) == [([0, 1], [0, 1]), ([2], [2])]
assert _split_matrix(np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])) == [([0], [1]), ([1], [0]), ([2], [2])]
assert _split_matrix(np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])) == [([0, 1, 2], [0, 1, 2])]
def test_pixel_to_pixel():
wcs_in = WCS(naxis=3)
wcs_in.wcs.ctype = 'DEC--TAN', 'FREQ', 'RA---TAN'
wcs_in.wcs.set()
wcs_out = WCS(naxis=3)
wcs_out.wcs.ctype = 'GLON-CAR', 'GLAT-CAR', 'FREQ'
wcs_out.wcs.set()
# First try with scalars
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
x, y, z = pixel_to_pixel(wcs_in, wcs_out, 1, 2, 3)
assert x.shape == ()
assert y.shape == ()
assert z.shape == ()
# Now try with broadcasted arrays
x = np.linspace(10, 20, 10)
y = np.linspace(10, 20, 20)
z = np.linspace(10, 20, 30)
Z1, Y1, X1 = np.meshgrid(z, y, x, indexing='ij', copy=False)
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
X2, Y2, Z2 = pixel_to_pixel(wcs_in, wcs_out, X1, Y1, Z1)
# The final arrays should have the correct shape
assert X2.shape == (30, 20, 10)
assert Y2.shape == (30, 20, 10)
assert Z2.shape == (30, 20, 10)
# But behind the scenes should also be broadcasted
assert unbroadcast(X2).shape == (30, 1, 10)
assert unbroadcast(Y2).shape == (30, 1, 10)
assert unbroadcast(Z2).shape == (20, 1)
# We can put the values back through the function to ensure round-tripping
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
X3, Y3, Z3 = pixel_to_pixel(wcs_out, wcs_in, X2, Y2, Z2)
# The final arrays should have the correct shape
assert X2.shape == (30, 20, 10)
assert Y2.shape == (30, 20, 10)
assert Z2.shape == (30, 20, 10)
# But behind the scenes should also be broadcasted
assert unbroadcast(X3).shape == (30, 1, 10)
assert unbroadcast(Y3).shape == (20, 1)
assert unbroadcast(Z3).shape == (30, 1, 10)
# And these arrays should match the input
assert_allclose(X1, X3)
assert_allclose(Y1, Y3)
assert_allclose(Z1, Z3)
def test_pixel_to_pixel_correlated():
wcs_in = WCS(naxis=2)
wcs_in.wcs.ctype = 'DEC--TAN', 'RA---TAN'
wcs_in.wcs.set()
wcs_out = WCS(naxis=2)
wcs_out.wcs.ctype = 'GLON-CAR', 'GLAT-CAR'
wcs_out.wcs.set()
# First try with scalars
x, y = pixel_to_pixel(wcs_in, wcs_out, 1, 2)
assert x.shape == ()
assert y.shape == ()
# Now try with broadcasted arrays
x = np.linspace(10, 20, 10)
y = np.linspace(10, 20, 20)
Y1, X1 = np.meshgrid(y, x, indexing='ij', copy=False)
Y2, X2 = pixel_to_pixel(wcs_in, wcs_out, X1, Y1)
# The final arrays should have the correct shape
assert X2.shape == (20, 10)
assert Y2.shape == (20, 10)
# and there are no efficiency gains here since the celestial axes are correlated
assert unbroadcast(X2).shape == (20, 10)
def test_pixel_to_pixel_1d():
# Simple test to make sure that when WCS only returns one world coordinate
# this still works correctly (since this requires special treatment behind
# the scenes).
wcs_in = WCS(naxis=1)
wcs_in.wcs.ctype = 'COORD1',
wcs_in.wcs.cunit = 'nm',
wcs_in.wcs.set()
wcs_out = WCS(naxis=1)
wcs_out.wcs.ctype = 'COORD2',
wcs_out.wcs.cunit = 'cm',
wcs_out.wcs.set()
# First try with a scalar
x = pixel_to_pixel(wcs_in, wcs_out, 1)
assert x.shape == ()
# Next with a regular array
x = np.linspace(10, 20, 10)
x = pixel_to_pixel(wcs_in, wcs_out, x)
assert x.shape == (10,)
# And now try with a broadcasted array
x = np.broadcast_to(np.linspace(10, 20, 10), (4, 10))
x = pixel_to_pixel(wcs_in, wcs_out, x)
assert x.shape == (4, 10)
# The broadcasting of the input should be retained
assert unbroadcast(x).shape == (10,)
header_str_linear = """
XTENSION= 'IMAGE ' / Image extension
BITPIX = -32 / array data type
NAXIS = 2 / number of array dimensions
NAXIS1 = 50
NAXIS2 = 50
PCOUNT = 0 / number of parameters
GCOUNT = 1 / number of groups
RADESYS = 'ICRS '
EQUINOX = 2000.0
WCSAXES = 2
CTYPE1 = 'RA---TAN'
CTYPE2 = 'DEC--TAN'
CRVAL1 = 250.3497414839765
CRVAL2 = 2.280925599609063
CRPIX1 = 1045.0
CRPIX2 = 1001.0
CD1_1 = -0.005564478186178
CD1_2 = -0.001042099258152
CD2_1 = 0.00118144146585
CD2_2 = -0.005590816683583
"""
header_str_sip = """
XTENSION= 'IMAGE ' / Image extension
BITPIX = -32 / array data type
NAXIS = 2 / number of array dimensions
NAXIS1 = 50
NAXIS2 = 50
PCOUNT = 0 / number of parameters
GCOUNT = 1 / number of groups
RADESYS = 'ICRS '
EQUINOX = 2000.0
WCSAXES = 2
CTYPE1 = 'RA---TAN-SIP'
CTYPE2 = 'DEC--TAN-SIP'
CRVAL1 = 250.3497414839765
CRVAL2 = 2.280925599609063
CRPIX1 = 1045.0
CRPIX2 = 1001.0
CD1_1 = -0.005564478186178
CD1_2 = -0.001042099258152
CD2_1 = 0.00118144146585
CD2_2 = -0.005590816683583
A_ORDER = 2
B_ORDER = 2
A_2_0 = 2.02451189234E-05
A_0_2 = 3.317603337918E-06
A_1_1 = 1.73456334971071E-05
B_2_0 = 3.331330003472E-06
B_0_2 = 2.04247482482589E-05
B_1_1 = 1.71476710804143E-05
AP_ORDER= 2
BP_ORDER= 2
AP_1_0 = 0.000904700296389636
AP_0_1 = 0.000627660715584716
AP_2_0 = -2.023482905861E-05
AP_0_2 = -3.332285841011E-06
AP_1_1 = -1.731636633824E-05
BP_1_0 = 0.000627960882053211
BP_0_1 = 0.000911222886084808
BP_2_0 = -3.343918167224E-06
BP_0_2 = -2.041598249021E-05
BP_1_1 = -1.711876336719E-05
A_DMAX = 44.72893589844534
B_DMAX = 44.62692873032506
"""
header_str_prob = """
NAXIS = 2 / number of array dimensions
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 1024.5 / Pixel coordinate of reference point
CRPIX2 = 1024.5 / Pixel coordinate of reference point
CD1_1 = -1.7445934400771E-05 / Coordinate transformation matrix element
CD1_2 = -4.9826985362578E-08 / Coordinate transformation matrix element
CD2_1 = -5.0068838822312E-08 / Coordinate transformation matrix element
CD2_2 = 1.7530614610951E-05 / Coordinate transformation matrix element
CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection
CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection
CRVAL1 = 5.8689341666667 / [deg] Coordinate value at reference point
CRVAL2 = -71.995508583333 / [deg] Coordinate value at reference point
"""
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize(
'header_str,crval,sip_degree,user_proj_point,exp_max_dist,exp_std_dist',
[
# simple testset no distortions
(header_str_linear, 250.3497414839765, None, False, 7e-5*u.deg, 2.5e-5*u.deg),
# simple testset with distortions
(header_str_sip, 250.3497414839765, 2, False, 7e-6*u.deg, 2.5e-6*u.deg),
# testset with problematic WCS header that failed before
(header_str_prob, 5.8689341666667, None, False, 7e-6*u.deg, 2.5e-6*u.deg),
# simple testset no distortions, user defined center
(header_str_linear, 250.3497414839765, None, True, 7e-5*u.deg, 2.5e-5*u.deg),
# 360->0 degree crossover, simple testset no distortions
(header_str_linear, 352.3497414839765, None, False, 7e-5*u.deg, 2.5e-5*u.deg),
# 360->0 degree crossover, simple testset with distortions
(header_str_sip, 352.3497414839765, 2, False, 7e-6*u.deg, 2.5e-6*u.deg),
# 360->0 degree crossover, testset with problematic WCS header that failed before
(header_str_prob, 352.3497414839765, None, False, 7e-6*u.deg, 2.5e-6*u.deg),
# 360->0 degree crossover, simple testset no distortions, user defined center
(header_str_linear, 352.3497414839765, None, True, 7e-5*u.deg, 2.5e-5*u.deg),
])
def test_fit_wcs_from_points(header_str, crval, sip_degree, user_proj_point,
exp_max_dist, exp_std_dist):
header = fits.Header.fromstring(header_str, sep='\n')
header["CRVAL1"] = crval
true_wcs = WCS(header, relax=True)
# Getting the pixel coordinates
x, y = np.meshgrid(list(range(10)), list(range(10)))
x = x.flatten()
y = y.flatten()
# Calculating the true sky positions
world_pix = true_wcs.pixel_to_world(x, y)
# which projection point to use
if user_proj_point:
proj_point = world_pix[0]
projlon = proj_point.data.lon.deg
projlat = proj_point.data.lat.deg
else:
proj_point = 'center'
# Fitting the wcs
fit_wcs = fit_wcs_from_points((x, y), world_pix,
proj_point=proj_point,
sip_degree=sip_degree)
# Validate that the true sky coordinates
# match sky coordinates calculated from the wcs fit
world_pix_new = fit_wcs.pixel_to_world(x, y)
dists = world_pix.separation(world_pix_new)
assert dists.max() < exp_max_dist
assert np.std(dists) < exp_std_dist
if user_proj_point:
assert (fit_wcs.wcs.crval == [projlon, projlat]).all()
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_wcs_from_points_CRPIX_bounds():
# Test CRPIX bounds requirement
wcs_str = """
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 1045.0 / Pixel coordinate of reference point
CRPIX2 = 1001.0 / Pixel coordinate of reference point
PC1_1 = 0.00056205870415378 / Coordinate transformation matrix element
PC1_2 = -0.00569181083243 / Coordinate transformation matrix element
PC2_1 = 0.0056776810932466 / Coordinate transformation matrix element
PC2_2 = 0.0004208048403273 / Coordinate transformation matrix element
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection
CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection
CRVAL1 = 104.57797893504 / [deg] Coordinate value at reference point
CRVAL2 = -74.195502593322 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = -74.195502593322 / [deg] Native latitude of celestial pole
TIMESYS = 'TDB' / Time scale
TIMEUNIT= 'd' / Time units
DATEREF = '1858-11-17' / ISO-8601 fiducial time
MJDREFI = 0.0 / [d] MJD of fiducial time, integer part
MJDREFF = 0.0 / [d] MJD of fiducial time, fractional part
DATE-OBS= '2019-03-27T03:30:13.832Z' / ISO-8601 time of observation
MJD-OBS = 58569.145993426 / [d] MJD of observation
MJD-OBS = 58569.145993426 / [d] MJD at start of observation
TSTART = 1569.6467941661 / [d] Time elapsed since fiducial time at start
DATE-END= '2019-03-27T04:00:13.831Z' / ISO-8601 time at end of observation
MJD-END = 58569.166826748 / [d] MJD at end of observation
TSTOP = 1569.6676274905 / [d] Time elapsed since fiducial time at end
TELAPSE = 0.02083332443 / [d] Elapsed time (start to stop)
TIMEDEL = 0.020833333333333 / [d] Time resolution
TIMEPIXR= 0.5 / Reference position of timestamp in binned data
RADESYS = 'ICRS' / Equatorial coordinate system
"""
wcs_header = fits.Header.fromstring(wcs_str, sep='\n')
ffi_wcs = WCS(wcs_header)
yi, xi = (1000, 1000)
y, x = (10, 200)
center_coord = SkyCoord(ffi_wcs.all_pix2world([[xi+x//2, yi+y//2]], 0), unit='deg')[0]
ypix, xpix = (arr.flatten() for arr in np.mgrid[xi : xi + x, yi : yi + y])
world_pix = SkyCoord(*ffi_wcs.all_pix2world(xpix, ypix, 0), unit='deg')
fit_wcs = fit_wcs_from_points((ypix, xpix), world_pix, proj_point='center')
assert (fit_wcs.wcs.crpix.astype(int) == [1100, 1005]).all()
assert fit_wcs.pixel_shape == (1199, 1009)
@pytest.mark.skipif('not HAS_SCIPY')
def test_issue10991():
# test issue #10991 (it just needs to run and set the user defined crval)
xy = np.array([[1766.88276168, 662.96432257, 171.50212526, 120.70924648],
[1706.69832901, 1788.85480559, 1216.98949653, 1307.41843381]])
world_coords = SkyCoord([(66.3542367, 22.20000162), (67.15416174, 19.18042906),
(65.73375432, 17.54251555), (66.02400512, 17.44413253)],
frame="icrs", unit="deg")
proj_point = SkyCoord(64.67514918, 19.63389538,
frame="icrs", unit="deg")
fit_wcs = fit_wcs_from_points(
xy=xy,
world_coords=world_coords,
proj_point=proj_point,
projection='TAN'
)
projlon = proj_point.data.lon.deg
projlat = proj_point.data.lat.deg
assert (fit_wcs.wcs.crval == [projlon, projlat]).all()
@pytest.mark.remote_data
@pytest.mark.parametrize('x_in,y_in', [[0, 0], [np.arange(5), np.arange(5)]])
def test_pixel_to_world_itrs(x_in, y_in):
"""Regression test for https://github.com/astropy/astropy/pull/9609"""
if Version(_wcs.__version__) >= Version('7.4'):
ctx = pytest.warns(
FITSFixedWarning,
match=r"'datfix' made the change 'Set MJD-OBS to 57982\.528524 from DATE-OBS'\.")
else:
ctx = nullcontext()
with ctx:
wcs = WCS({'NAXIS': 2,
'CTYPE1': 'TLON-CAR',
'CTYPE2': 'TLAT-CAR',
'RADESYS': 'ITRS ',
'DATE-OBS': '2017-08-17T12:41:04.444'})
# This shouldn't raise an exception.
coord = wcs.pixel_to_world(x_in, y_in)
# Check round trip transformation.
x, y = wcs.world_to_pixel(coord)
np.testing.assert_almost_equal(x, x_in)
np.testing.assert_almost_equal(y, y_in)
@pytest.fixture
def dkist_location():
return EarthLocation(*(-5466045.25695494, -2404388.73741278, 2242133.88769004) * u.m)
def test_obsgeo_cartesian(dkist_location):
obstime = Time("2021-05-21T03:00:00")
wcs = WCS(naxis=2)
wcs.wcs.obsgeo = list(dkist_location.to_value(u.m).tolist()) + [0, 0, 0]
wcs.wcs.dateobs = obstime.isot
frame = obsgeo_to_frame(wcs.wcs.obsgeo, obstime)
assert isinstance(frame, ITRS)
assert frame.x == dkist_location.x
assert frame.y == dkist_location.y
assert frame.z == dkist_location.z
def test_obsgeo_spherical(dkist_location):
obstime = Time("2021-05-21T03:00:00")
dkist_location = dkist_location.get_itrs(obstime)
loc_sph = dkist_location.spherical
wcs = WCS(naxis=2)
wcs.wcs.obsgeo = [0, 0, 0] + [loc_sph.lon.value, loc_sph.lat.value, loc_sph.distance.value]
wcs.wcs.dateobs = obstime.isot
frame = obsgeo_to_frame(wcs.wcs.obsgeo, obstime)
assert isinstance(frame, ITRS)
assert u.allclose(frame.x, dkist_location.x)
assert u.allclose(frame.y, dkist_location.y)
assert u.allclose(frame.z, dkist_location.z)
def test_obsgeo_infinite(dkist_location):
obstime = Time("2021-05-21T03:00:00")
dkist_location = dkist_location.get_itrs(obstime)
loc_sph = dkist_location.spherical
wcs = WCS(naxis=2)
wcs.wcs.obsgeo = [1, 1, np.nan] + [loc_sph.lon.value, loc_sph.lat.value, loc_sph.distance.value]
wcs.wcs.dateobs = obstime.isot
wcs.wcs.set()
frame = obsgeo_to_frame(wcs.wcs.obsgeo, obstime)
assert isinstance(frame, ITRS)
assert u.allclose(frame.x, dkist_location.x)
assert u.allclose(frame.y, dkist_location.y)
assert u.allclose(frame.z, dkist_location.z)
@pytest.mark.parametrize("obsgeo", ([np.nan] * 6, None, [0] * 6, [54] * 5))
def test_obsgeo_invalid(obsgeo):
with pytest.raises(ValueError):
obsgeo_to_frame(obsgeo, None)
|
3f5b9be6f8a45ba4fdad55b3f41928d7ca432913d9f69e1dc8fbb005224f95d1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import os
from contextlib import nullcontext
from datetime import datetime
from packaging.version import Version
import pytest
import numpy as np
from numpy.testing import (
assert_allclose, assert_array_almost_equal, assert_array_almost_equal_nulp,
assert_array_equal)
from astropy import wcs
from astropy.wcs import _wcs # noqa
from astropy import units as u
from astropy.utils.data import (
get_pkg_data_filenames, get_pkg_data_contents, get_pkg_data_filename)
from astropy.utils.misc import NumpyRNGContext
from astropy.utils.exceptions import (
AstropyUserWarning, AstropyWarning, AstropyDeprecationWarning)
from astropy.tests.helper import assert_quantity_allclose
from astropy.io import fits
from astropy.coordinates import SkyCoord
from astropy.nddata import Cutout2D
_WCSLIB_VER = Version(_wcs.__version__)
# NOTE: User can choose to use system wcslib instead of bundled.
def ctx_for_v71_dateref_warnings():
if _WCSLIB_VER >= Version('7.1') and _WCSLIB_VER < Version('7.3'):
ctx = pytest.warns(
wcs.FITSFixedWarning,
match=r"'datfix' made the change 'Set DATE-REF to '1858-11-17' from MJD-REF'\.")
else:
ctx = nullcontext()
return ctx
class TestMaps:
def setup(self):
# get the list of the hdr files that we want to test
self._file_list = list(get_pkg_data_filenames(
"data/maps", pattern="*.hdr"))
def test_consistency(self):
# Check to see that we actually have the list we expect, so that we
# do not get in a situation where the list is empty or incomplete and
# the tests still seem to pass correctly.
# how many do we expect to see?
n_data_files = 28
assert len(self._file_list) == n_data_files, (
"test_spectra has wrong number data files: found {}, expected "
" {}".format(len(self._file_list), n_data_files))
def test_maps(self):
for filename in self._file_list:
# use the base name of the file, so we get more useful messages
# for failing tests.
filename = os.path.basename(filename)
# Now find the associated file in the installed wcs test directory.
header = get_pkg_data_contents(
os.path.join("data", "maps", filename), encoding='binary')
# finally run the test.
wcsobj = wcs.WCS(header)
world = wcsobj.wcs_pix2world([[97, 97]], 1)
assert_array_almost_equal(world, [[285.0, -66.25]], decimal=1)
pix = wcsobj.wcs_world2pix([[285.0, -66.25]], 1)
assert_array_almost_equal(pix, [[97, 97]], decimal=0)
class TestSpectra:
def setup(self):
self._file_list = list(get_pkg_data_filenames("data/spectra",
pattern="*.hdr"))
def test_consistency(self):
# Check to see that we actually have the list we expect, so that we
# do not get in a situation where the list is empty or incomplete and
# the tests still seem to pass correctly.
# how many do we expect to see?
n_data_files = 6
assert len(self._file_list) == n_data_files, (
"test_spectra has wrong number data files: found {}, expected "
" {}".format(len(self._file_list), n_data_files))
def test_spectra(self):
for filename in self._file_list:
# use the base name of the file, so we get more useful messages
# for failing tests.
filename = os.path.basename(filename)
# Now find the associated file in the installed wcs test directory.
header = get_pkg_data_contents(
os.path.join("data", "spectra", filename), encoding='binary')
# finally run the test.
if _WCSLIB_VER >= Version('7.4'):
ctx = pytest.warns(
wcs.FITSFixedWarning,
match=r"'datfix' made the change 'Set MJD-OBS to 53925\.853472 from DATE-OBS'\.") # noqa
else:
ctx = nullcontext()
with ctx:
all_wcs = wcs.find_all_wcs(header)
assert len(all_wcs) == 9
def test_fixes():
"""
From github issue #36
"""
header = get_pkg_data_contents('data/nonstandard_units.hdr', encoding='binary')
with pytest.raises(wcs.InvalidTransformError), pytest.warns(wcs.FITSFixedWarning) as w:
wcs.WCS(header, translate_units='dhs')
if Version('7.4') <= _WCSLIB_VER < Version('7.6'):
assert len(w) == 3
assert "'datfix' made the change 'Success'." in str(w.pop().message)
else:
assert len(w) == 2
first_wmsg = str(w[0].message)
assert 'unitfix' in first_wmsg and 'Hz' in first_wmsg and 'M/S' in first_wmsg
assert 'plane angle' in str(w[1].message) and 'm/s' in str(w[1].message)
# Ignore "PV2_2 = 0.209028857410973 invalid keyvalue" warning seen on Windows.
@pytest.mark.filterwarnings(r'ignore:PV2_2')
def test_outside_sky():
"""
From github issue #107
"""
header = get_pkg_data_contents(
'data/outside_sky.hdr', encoding='binary')
w = wcs.WCS(header)
assert np.all(np.isnan(w.wcs_pix2world([[100., 500.]], 0))) # outside sky
assert np.all(np.isnan(w.wcs_pix2world([[200., 200.]], 0))) # outside sky
assert not np.any(np.isnan(w.wcs_pix2world([[1000., 1000.]], 0)))
def test_pix2world():
"""
From github issue #1463
"""
# TODO: write this to test the expected output behavior of pix2world,
# currently this just makes sure it doesn't error out in unexpected ways
# (and compares `wcs.pc` and `result` values?)
filename = get_pkg_data_filename('data/sip2.fits')
with pytest.warns(wcs.FITSFixedWarning) as caught_warnings:
# this raises a warning unimportant for this testing the pix2world
# FITSFixedWarning(u'The WCS transformation has more axes (2) than
# the image it is associated with (0)')
ww = wcs.WCS(filename)
# might as well monitor for changing behavior
if Version('7.4') <= _WCSLIB_VER < Version('7.6'):
assert len(caught_warnings) == 2
else:
assert len(caught_warnings) == 1
n = 3
pixels = (np.arange(n) * np.ones((2, n))).T
result = ww.wcs_pix2world(pixels, 0, ra_dec_order=True)
# Catch #2791
ww.wcs_pix2world(pixels[..., 0], pixels[..., 1], 0, ra_dec_order=True)
# assuming that the data of sip2.fits doesn't change
answer = np.array([[0.00024976, 0.00023018],
[0.00023043, -0.00024997]])
assert np.allclose(ww.wcs.pc, answer, atol=1.e-8)
answer = np.array([[202.39265216, 47.17756518],
[202.39335826, 47.17754619],
[202.39406436, 47.1775272]])
assert np.allclose(result, answer, atol=1.e-8, rtol=1.e-10)
def test_load_fits_path():
fits_name = get_pkg_data_filename('data/sip.fits')
with pytest.warns(wcs.FITSFixedWarning):
wcs.WCS(fits_name)
def test_dict_init():
"""
Test that WCS can be initialized with a dict-like object
"""
# Dictionary with no actual WCS, returns identity transform
with ctx_for_v71_dateref_warnings():
w = wcs.WCS({})
xp, yp = w.wcs_world2pix(41., 2., 1)
assert_array_almost_equal_nulp(xp, 41., 10)
assert_array_almost_equal_nulp(yp, 2., 10)
# Valid WCS
hdr = {
'CTYPE1': 'GLON-CAR',
'CTYPE2': 'GLAT-CAR',
'CUNIT1': 'deg',
'CUNIT2': 'deg',
'CRPIX1': 1,
'CRPIX2': 1,
'CRVAL1': 40.,
'CRVAL2': 0.,
'CDELT1': -0.1,
'CDELT2': 0.1
}
if _WCSLIB_VER >= Version('7.1'):
hdr['DATEREF'] = '1858-11-17'
if _WCSLIB_VER >= Version('7.4'):
ctx = pytest.warns(
wcs.wcs.FITSFixedWarning,
match=r"'datfix' made the change 'Set MJDREF to 0\.000000 from DATEREF'\.")
else:
ctx = nullcontext()
with ctx:
w = wcs.WCS(hdr)
xp, yp = w.wcs_world2pix(41., 2., 0)
assert_array_almost_equal_nulp(xp, -10., 10)
assert_array_almost_equal_nulp(yp, 20., 10)
def test_extra_kwarg():
"""
Issue #444
"""
w = wcs.WCS()
with NumpyRNGContext(123456789):
data = np.random.rand(100, 2)
with pytest.raises(TypeError):
w.wcs_pix2world(data, origin=1)
def test_3d_shapes():
"""
Issue #444
"""
w = wcs.WCS(naxis=3)
with NumpyRNGContext(123456789):
data = np.random.rand(100, 3)
result = w.wcs_pix2world(data, 1)
assert result.shape == (100, 3)
result = w.wcs_pix2world(
data[..., 0], data[..., 1], data[..., 2], 1)
assert len(result) == 3
def test_preserve_shape():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = np.random.random((2, 3, 4))
xw, yw = w.wcs_pix2world(x, y, 1)
assert xw.shape == (2, 3, 4)
assert yw.shape == (2, 3, 4)
xp, yp = w.wcs_world2pix(x, y, 1)
assert xp.shape == (2, 3, 4)
assert yp.shape == (2, 3, 4)
def test_broadcasting():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = 1
xp, yp = w.wcs_world2pix(x, y, 1)
assert xp.shape == (2, 3, 4)
assert yp.shape == (2, 3, 4)
def test_shape_mismatch():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = np.random.random((3, 2, 4))
with pytest.raises(ValueError) as exc:
xw, yw = w.wcs_pix2world(x, y, 1)
assert exc.value.args[0] == "Coordinate arrays are not broadcastable to each other"
with pytest.raises(ValueError) as exc:
xp, yp = w.wcs_world2pix(x, y, 1)
assert exc.value.args[0] == "Coordinate arrays are not broadcastable to each other"
# There are some ambiguities that need to be worked around when
# naxis == 1
w = wcs.WCS(naxis=1)
x = np.random.random((42, 1))
xw = w.wcs_pix2world(x, 1)
assert xw.shape == (42, 1)
x = np.random.random((42,))
xw, = w.wcs_pix2world(x, 1)
assert xw.shape == (42,)
def test_invalid_shape():
# Issue #1395
w = wcs.WCS(naxis=2)
xy = np.random.random((2, 3))
with pytest.raises(ValueError) as exc:
w.wcs_pix2world(xy, 1)
assert exc.value.args[0] == 'When providing two arguments, the array must be of shape (N, 2)'
xy = np.random.random((2, 1))
with pytest.raises(ValueError) as exc:
w.wcs_pix2world(xy, 1)
assert exc.value.args[0] == 'When providing two arguments, the array must be of shape (N, 2)'
def test_warning_about_defunct_keywords():
header = get_pkg_data_contents('data/defunct_keywords.hdr', encoding='binary')
if Version('7.4') <= _WCSLIB_VER < Version('7.6'):
n_warn = 5
else:
n_warn = 4
# Make sure the warnings come out every time...
for _ in range(2):
with pytest.warns(wcs.FITSFixedWarning) as w:
wcs.WCS(header)
assert len(w) == n_warn
# 7.4 adds a fifth warning "'datfix' made the change 'Success'."
for item in w[:4]:
assert 'PCi_ja' in str(item.message)
def test_warning_about_defunct_keywords_exception():
header = get_pkg_data_contents('data/defunct_keywords.hdr', encoding='binary')
with pytest.warns(wcs.FITSFixedWarning):
wcs.WCS(header)
def test_to_header_string():
hdrstr = (
"WCSAXES = 2 / Number of coordinate axes ",
"CRPIX1 = 0.0 / Pixel coordinate of reference point ",
"CRPIX2 = 0.0 / Pixel coordinate of reference point ",
"CDELT1 = 1.0 / Coordinate increment at reference point ",
"CDELT2 = 1.0 / Coordinate increment at reference point ",
"CRVAL1 = 0.0 / Coordinate value at reference point ",
"CRVAL2 = 0.0 / Coordinate value at reference point ",
"LATPOLE = 90.0 / [deg] Native latitude of celestial pole ",
)
if _WCSLIB_VER >= Version('7.3'):
hdrstr += (
"MJDREF = 0.0 / [d] MJD of fiducial time ",
)
elif _WCSLIB_VER >= Version('7.1'):
hdrstr += (
"DATEREF = '1858-11-17' / ISO-8601 fiducial time ",
"MJDREFI = 0.0 / [d] MJD of fiducial time, integer part ",
"MJDREFF = 0.0 / [d] MJD of fiducial time, fractional part "
)
hdrstr += ("END", )
header_string = ''.join(hdrstr)
w = wcs.WCS()
h0 = fits.Header.fromstring(w.to_header_string().strip())
if 'COMMENT' in h0:
del h0['COMMENT']
if '' in h0:
del h0['']
h1 = fits.Header.fromstring(header_string.strip())
assert dict(h0) == dict(h1)
def test_to_fits():
nrec = 11 if _WCSLIB_VER >= Version('7.1') else 8
if _WCSLIB_VER < Version('7.1'):
nrec = 8
elif _WCSLIB_VER < Version('7.3'):
nrec = 11
else:
nrec = 9
w = wcs.WCS()
header_string = w.to_header()
wfits = w.to_fits()
assert isinstance(wfits, fits.HDUList)
assert isinstance(wfits[0], fits.PrimaryHDU)
assert header_string == wfits[0].header[-nrec:]
def test_to_header_warning():
fits_name = get_pkg_data_filename('data/sip.fits')
with pytest.warns(wcs.FITSFixedWarning):
x = wcs.WCS(fits_name)
with pytest.warns(AstropyWarning, match='A_ORDER') as w:
x.to_header()
assert len(w) == 1
def test_no_comments_in_header():
w = wcs.WCS()
header = w.to_header()
assert w.wcs.alt not in header
assert 'COMMENT' + w.wcs.alt.strip() not in header
assert 'COMMENT' not in header
wkey = 'P'
header = w.to_header(key=wkey)
assert wkey not in header
assert 'COMMENT' not in header
assert 'COMMENT' + w.wcs.alt.strip() not in header
def test_find_all_wcs_crash():
"""
Causes a double free without a recent fix in wcslib_wrap.C
"""
with open(get_pkg_data_filename("data/too_many_pv.hdr")) as fd:
header = fd.read()
# We have to set fix=False here, because one of the fixing tasks is to
# remove redundant SCAMP distortion parameters when SIP distortion
# parameters are also present.
with pytest.raises(wcs.InvalidTransformError), pytest.warns(wcs.FITSFixedWarning):
wcs.find_all_wcs(header, fix=False)
# NOTE: Warning bubbles up from C layer during wcs.validate() and
# is hard to catch, so we just ignore it.
@pytest.mark.filterwarnings("ignore")
def test_validate():
results = wcs.validate(get_pkg_data_filename("data/validate.fits"))
results_txt = sorted({x.strip() for x in repr(results).splitlines()})
if _WCSLIB_VER >= Version('7.6'):
filename = 'data/validate.7.6.txt'
elif _WCSLIB_VER >= Version('7.4'):
filename = 'data/validate.7.4.txt'
elif _WCSLIB_VER >= Version('6.0'):
filename = 'data/validate.6.txt'
elif _WCSLIB_VER >= Version('5.13'):
filename = 'data/validate.5.13.txt'
elif _WCSLIB_VER >= Version('5.0'):
filename = 'data/validate.5.0.txt'
else:
filename = 'data/validate.txt'
with open(get_pkg_data_filename(filename)) as fd:
lines = fd.readlines()
assert sorted({x.strip() for x in lines}) == results_txt
def test_validate_with_2_wcses():
# From Issue #2053
with pytest.warns(AstropyUserWarning):
results = wcs.validate(get_pkg_data_filename("data/2wcses.hdr"))
assert "WCS key 'A':" in str(results)
def test_crpix_maps_to_crval():
twcs = wcs.WCS(naxis=2)
twcs.wcs.crval = [251.29, 57.58]
twcs.wcs.cdelt = [1, 1]
twcs.wcs.crpix = [507, 507]
twcs.wcs.pc = np.array([[7.7e-6, 3.3e-5], [3.7e-5, -6.8e-6]])
twcs._naxis = [1014, 1014]
twcs.wcs.ctype = ['RA---TAN-SIP', 'DEC--TAN-SIP']
a = np.array(
[[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
[0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
[-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
[-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
[-2.81029767e-13, 0.0, 0.0, 0.0, 0.0]]
)
b = np.array(
[[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
[0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
[6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
[3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
[-5.92438525e-13, 0.0, 0.0, 0.0, 0.0]]
)
twcs.sip = wcs.Sip(a, b, None, None, twcs.wcs.crpix)
twcs.wcs.set()
pscale = np.sqrt(wcs.utils.proj_plane_pixel_area(twcs))
# test that CRPIX maps to CRVAL:
assert_allclose(
twcs.wcs_pix2world(*twcs.wcs.crpix, 1), twcs.wcs.crval,
rtol=0.0, atol=1e-6 * pscale
)
# test that CRPIX maps to CRVAL:
assert_allclose(
twcs.all_pix2world(*twcs.wcs.crpix, 1), twcs.wcs.crval,
rtol=0.0, atol=1e-6 * pscale
)
def test_all_world2pix(fname=None, ext=0,
tolerance=1.0e-4, origin=0,
random_npts=25000,
adaptive=False, maxiter=20,
detect_divergence=True):
"""Test all_world2pix, iterative inverse of all_pix2world"""
# Open test FITS file:
if fname is None:
fname = get_pkg_data_filename('data/j94f05bgq_flt.fits')
ext = ('SCI', 1)
if not os.path.isfile(fname):
raise OSError(f"Input file '{fname:s}' to 'test_all_world2pix' not found.")
h = fits.open(fname)
w = wcs.WCS(h[ext].header, h)
h.close()
del h
crpix = w.wcs.crpix
ncoord = crpix.shape[0]
# Assume that CRPIX is at the center of the image and that the image has
# a power-of-2 number of pixels along each axis. Only use the central
# 1/64 for this testing purpose:
naxesi_l = list((7. / 16 * crpix).astype(int))
naxesi_u = list((9. / 16 * crpix).astype(int))
# Generate integer indices of pixels (image grid):
img_pix = np.dstack([i.flatten() for i in
np.meshgrid(*map(range, naxesi_l, naxesi_u))])[0]
# Generage random data (in image coordinates):
with NumpyRNGContext(123456789):
rnd_pix = np.random.rand(random_npts, ncoord)
# Scale random data to cover the central part of the image
mwidth = 2 * (crpix * 1. / 8)
rnd_pix = crpix - 0.5 * mwidth + (mwidth - 1) * rnd_pix
# Reference pixel coordinates in image coordinate system (CS):
test_pix = np.append(img_pix, rnd_pix, axis=0)
# Reference pixel coordinates in sky CS using forward transformation:
all_world = w.all_pix2world(test_pix, origin)
try:
runtime_begin = datetime.now()
# Apply the inverse iterative process to pixels in world coordinates
# to recover the pixel coordinates in image space.
all_pix = w.all_world2pix(
all_world, origin, tolerance=tolerance, adaptive=adaptive,
maxiter=maxiter, detect_divergence=detect_divergence)
runtime_end = datetime.now()
except wcs.wcs.NoConvergence as e:
runtime_end = datetime.now()
ndiv = 0
if e.divergent is not None:
ndiv = e.divergent.shape[0]
print(f"There are {ndiv} diverging solutions.")
print(f"Indices of diverging solutions:\n{e.divergent}")
print(f"Diverging solutions:\n{e.best_solution[e.divergent]}\n")
print("Mean radius of the diverging solutions: {}"
.format(np.mean(
np.linalg.norm(e.best_solution[e.divergent], axis=1))))
print("Mean accuracy of the diverging solutions: {}\n"
.format(np.mean(
np.linalg.norm(e.accuracy[e.divergent], axis=1))))
else:
print("There are no diverging solutions.")
nslow = 0
if e.slow_conv is not None:
nslow = e.slow_conv.shape[0]
print(f"There are {nslow} slowly converging solutions.")
print(f"Indices of slowly converging solutions:\n{e.slow_conv}")
print(f"Slowly converging solutions:\n{e.best_solution[e.slow_conv]}\n")
else:
print("There are no slowly converging solutions.\n")
print("There are {} converged solutions."
.format(e.best_solution.shape[0] - ndiv - nslow))
print(f"Best solutions (all points):\n{e.best_solution}")
print(f"Accuracy:\n{e.accuracy}\n")
print("\nFinished running 'test_all_world2pix' with errors.\n"
"ERROR: {}\nRun time: {}\n"
.format(e.args[0], runtime_end - runtime_begin))
raise e
# Compute differences between reference pixel coordinates and
# pixel coordinates (in image space) recovered from reference
# pixels in world coordinates:
errors = np.sqrt(np.sum(np.power(all_pix - test_pix, 2), axis=1))
meanerr = np.mean(errors)
maxerr = np.amax(errors)
print("\nFinished running 'test_all_world2pix'.\n"
"Mean error = {:e} (Max error = {:e})\n"
"Run time: {}\n"
.format(meanerr, maxerr, runtime_end - runtime_begin))
assert(maxerr < 2.0 * tolerance)
def test_scamp_sip_distortion_parameters():
"""
Test parsing of WCS parameters with redundant SIP and SCAMP distortion
parameters.
"""
header = get_pkg_data_contents('data/validate.fits', encoding='binary')
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(header)
# Just check that this doesn't raise an exception.
w.all_pix2world(0, 0, 0)
def test_fixes2():
"""
From github issue #1854
"""
header = get_pkg_data_contents(
'data/nonstandard_units.hdr', encoding='binary')
with pytest.raises(wcs.InvalidTransformError):
wcs.WCS(header, fix=False)
def test_unit_normalization():
"""
From github issue #1918
"""
header = get_pkg_data_contents(
'data/unit.hdr', encoding='binary')
w = wcs.WCS(header)
assert w.wcs.cunit[2] == 'm/s'
def test_footprint_to_file(tmpdir):
"""
From github issue #1912
"""
# Arbitrary keywords from real data
hdr = {'CTYPE1': 'RA---ZPN', 'CRUNIT1': 'deg',
'CRPIX1': -3.3495999e+02, 'CRVAL1': 3.185790700000e+02,
'CTYPE2': 'DEC--ZPN', 'CRUNIT2': 'deg',
'CRPIX2': 3.0453999e+03, 'CRVAL2': 4.388538000000e+01,
'PV2_1': 1., 'PV2_3': 220., 'NAXIS1': 2048, 'NAXIS2': 1024}
w = wcs.WCS(hdr)
testfile = str(tmpdir.join('test.txt'))
w.footprint_to_file(testfile)
with open(testfile) as f:
lines = f.readlines()
assert len(lines) == 4
assert lines[2] == 'ICRS\n'
assert 'color=green' in lines[3]
w.footprint_to_file(testfile, coordsys='FK5', color='red')
with open(testfile) as f:
lines = f.readlines()
assert len(lines) == 4
assert lines[2] == 'FK5\n'
assert 'color=red' in lines[3]
with pytest.raises(ValueError):
w.footprint_to_file(testfile, coordsys='FOO')
del hdr['NAXIS1']
del hdr['NAXIS2']
w = wcs.WCS(hdr)
with pytest.warns(AstropyUserWarning):
w.footprint_to_file(testfile)
# Ignore FITSFixedWarning about keyrecords following the END keyrecord were
# ignored, which comes from src/astropy_wcs.c . Only a blind catch like this
# seems to work when pytest warnings are turned into exceptions.
@pytest.mark.filterwarnings('ignore')
def test_validate_faulty_wcs():
"""
From github issue #2053
"""
h = fits.Header()
# Illegal WCS:
h['RADESYSA'] = 'ICRS'
h['PV2_1'] = 1.0
hdu = fits.PrimaryHDU([[0]], header=h)
hdulist = fits.HDUList([hdu])
# Check that this doesn't raise a NameError exception
wcs.validate(hdulist)
def test_error_message():
header = get_pkg_data_contents(
'data/invalid_header.hdr', encoding='binary')
with pytest.raises(wcs.InvalidTransformError):
# Both lines are in here, because 0.4 calls .set within WCS.__init__,
# whereas 0.3 and earlier did not.
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(header, _do_set=False)
w.all_pix2world([[536.0, 894.0]], 0)
def test_out_of_bounds():
# See #2107
header = get_pkg_data_contents('data/zpn-hole.hdr', encoding='binary')
w = wcs.WCS(header)
ra, dec = w.wcs_pix2world(110, 110, 0)
assert np.isnan(ra)
assert np.isnan(dec)
ra, dec = w.wcs_pix2world(0, 0, 0)
assert not np.isnan(ra)
assert not np.isnan(dec)
def test_calc_footprint_1():
fits = get_pkg_data_filename('data/sip.fits')
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(fits)
axes = (1000, 1051)
ref = np.array([[202.39314493, 47.17753352],
[202.71885939, 46.94630488],
[202.94631893, 47.15855022],
[202.72053428, 47.37893142]])
footprint = w.calc_footprint(axes=axes)
assert_allclose(footprint, ref)
def test_calc_footprint_2():
""" Test calc_footprint without distortion. """
fits = get_pkg_data_filename('data/sip.fits')
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(fits)
axes = (1000, 1051)
ref = np.array([[202.39265216, 47.17756518],
[202.7469062, 46.91483312],
[203.11487481, 47.14359319],
[202.76092671, 47.40745948]])
footprint = w.calc_footprint(axes=axes, undistort=False)
assert_allclose(footprint, ref)
def test_calc_footprint_3():
""" Test calc_footprint with corner of the pixel."""
w = wcs.WCS()
w.wcs.ctype = ["GLON-CAR", "GLAT-CAR"]
w.wcs.crpix = [1.5, 5.5]
w.wcs.cdelt = [-0.1, 0.1]
axes = (2, 10)
ref = np.array([[0.1, -0.5],
[0.1, 0.5],
[359.9, 0.5],
[359.9, -0.5]])
footprint = w.calc_footprint(axes=axes, undistort=False, center=False)
assert_allclose(footprint, ref)
def test_sip():
# See #2107
header = get_pkg_data_contents('data/irac_sip.hdr', encoding='binary')
w = wcs.WCS(header)
x0, y0 = w.sip_pix2foc(200, 200, 0)
assert_allclose(72, x0, 1e-3)
assert_allclose(72, y0, 1e-3)
x1, y1 = w.sip_foc2pix(x0, y0, 0)
assert_allclose(200, x1, 1e-3)
assert_allclose(200, y1, 1e-3)
def test_sub_3d_with_sip():
# See #10527
header = get_pkg_data_contents('data/irac_sip.hdr', encoding='binary')
header = fits.Header.fromstring(header)
header['NAXIS'] = 3
header.set('NAXIS3', 64, after=header.index('NAXIS2'))
w = wcs.WCS(header, naxis=2)
assert w.naxis == 2
def test_printwcs(capsys):
"""
Just make sure that it runs
"""
h = get_pkg_data_contents(
'data/spectra/orion-freq-1.hdr', encoding='binary')
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(h)
w.printwcs()
captured = capsys.readouterr()
assert 'WCS Keywords' in captured.out
h = get_pkg_data_contents('data/3d_cd.hdr', encoding='binary')
w = wcs.WCS(h)
w.printwcs()
captured = capsys.readouterr()
assert 'WCS Keywords' in captured.out
def test_invalid_spherical():
header = """
SIMPLE = T / conforms to FITS standard
BITPIX = 8 / array data type
WCSAXES = 2 / no comment
CTYPE1 = 'RA---TAN' / TAN (gnomic) projection
CTYPE2 = 'DEC--TAN' / TAN (gnomic) projection
EQUINOX = 2000.0 / Equatorial coordinates definition (yr)
LONPOLE = 180.0 / no comment
LATPOLE = 0.0 / no comment
CRVAL1 = 16.0531567459 / RA of reference point
CRVAL2 = 23.1148929108 / DEC of reference point
CRPIX1 = 2129 / X reference pixel
CRPIX2 = 1417 / Y reference pixel
CUNIT1 = 'deg ' / X pixel scale units
CUNIT2 = 'deg ' / Y pixel scale units
CD1_1 = -0.00912247310646 / Transformation matrix
CD1_2 = -0.00250608809647 / no comment
CD2_1 = 0.00250608809647 / no comment
CD2_2 = -0.00912247310646 / no comment
IMAGEW = 4256 / Image width, in pixels.
IMAGEH = 2832 / Image height, in pixels.
"""
f = io.StringIO(header)
header = fits.Header.fromtextfile(f)
w = wcs.WCS(header)
x, y = w.wcs_world2pix(211, -26, 0)
assert np.isnan(x) and np.isnan(y)
def test_no_iteration():
# Regression test for #3066
w = wcs.WCS(naxis=2)
with pytest.raises(TypeError) as exc:
iter(w)
assert exc.value.args[0] == "'WCS' object is not iterable"
class NewWCS(wcs.WCS):
pass
w = NewWCS(naxis=2)
with pytest.raises(TypeError) as exc:
iter(w)
assert exc.value.args[0] == "'NewWCS' object is not iterable"
@pytest.mark.skipif('_wcs.__version__[0] < "5"',
reason="TPV only works with wcslib 5.x or later")
def test_sip_tpv_agreement():
sip_header = get_pkg_data_contents(
os.path.join("data", "siponly.hdr"), encoding='binary')
tpv_header = get_pkg_data_contents(
os.path.join("data", "tpvonly.hdr"), encoding='binary')
with pytest.warns(wcs.FITSFixedWarning):
w_sip = wcs.WCS(sip_header)
w_tpv = wcs.WCS(tpv_header)
assert_array_almost_equal(
w_sip.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv.all_pix2world([w_tpv.wcs.crpix], 1))
w_sip2 = wcs.WCS(w_sip.to_header())
w_tpv2 = wcs.WCS(w_tpv.to_header())
assert_array_almost_equal(
w_sip.all_pix2world([w_sip.wcs.crpix], 1),
w_sip2.all_pix2world([w_sip.wcs.crpix], 1))
assert_array_almost_equal(
w_tpv.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv2.all_pix2world([w_sip.wcs.crpix], 1))
assert_array_almost_equal(
w_sip2.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv2.all_pix2world([w_tpv.wcs.crpix], 1))
@pytest.mark.skipif('_wcs.__version__[0] < "5"',
reason="TPV only works with wcslib 5.x or later")
def test_tpv_copy():
# See #3904
tpv_header = get_pkg_data_contents(
os.path.join("data", "tpvonly.hdr"), encoding='binary')
with pytest.warns(wcs.FITSFixedWarning):
w_tpv = wcs.WCS(tpv_header)
ra, dec = w_tpv.wcs_pix2world([0, 100, 200], [0, -100, 200], 0)
assert ra[0] != ra[1] and ra[1] != ra[2]
assert dec[0] != dec[1] and dec[1] != dec[2]
def test_hst_wcs():
path = get_pkg_data_filename("data/dist_lookup.fits.gz")
with fits.open(path) as hdulist:
# wcslib will complain about the distortion parameters if they
# weren't correctly deleted from the header
w = wcs.WCS(hdulist[1].header, hdulist)
# Check pixel scale and area
assert_quantity_allclose(
w.proj_plane_pixel_scales(), [1.38484378e-05, 1.39758488e-05] * u.deg)
assert_quantity_allclose(
w.proj_plane_pixel_area(), 1.93085492e-10 * (u.deg * u.deg))
# Exercise the main transformation functions, mainly just for
# coverage
w.p4_pix2foc([0, 100, 200], [0, -100, 200], 0)
w.det2im([0, 100, 200], [0, -100, 200], 0)
w.cpdis1 = w.cpdis1
w.cpdis2 = w.cpdis2
w.det2im1 = w.det2im1
w.det2im2 = w.det2im2
w.sip = w.sip
w.cpdis1.cdelt = w.cpdis1.cdelt
w.cpdis1.crpix = w.cpdis1.crpix
w.cpdis1.crval = w.cpdis1.crval
w.cpdis1.data = w.cpdis1.data
assert w.sip.a_order == 4
assert w.sip.b_order == 4
assert w.sip.ap_order == 0
assert w.sip.bp_order == 0
assert_array_equal(w.sip.crpix, [2048., 1024.])
wcs.WCS(hdulist[1].header, hdulist)
def test_cpdis_comments():
path = get_pkg_data_filename("data/dist_lookup.fits.gz")
f = fits.open(path)
w = wcs.WCS(f[1].header, f)
hdr = w.to_fits()[0].header
f.close()
wcscards = list(hdr['CPDIS*'].cards) + list(hdr['DP*'].cards)
wcsdict = {k: (v, c) for k, v, c in wcscards}
refcards = [
('CPDIS1', 'LOOKUP', 'Prior distortion function type'),
('DP1.EXTVER', 1.0, 'Version number of WCSDVARR extension'),
('DP1.NAXES', 2.0, 'Number of independent variables in CPDIS function'),
('DP1.AXIS.1', 1.0, 'Axis number of the 1st variable in a CPDIS function'),
('DP1.AXIS.2', 2.0, 'Axis number of the 2nd variable in a CPDIS function'),
('CPDIS2', 'LOOKUP', 'Prior distortion function type'),
('DP2.EXTVER', 2.0, 'Version number of WCSDVARR extension'),
('DP2.NAXES', 2.0, 'Number of independent variables in CPDIS function'),
('DP2.AXIS.1', 1.0, 'Axis number of the 1st variable in a CPDIS function'),
('DP2.AXIS.2', 2.0, 'Axis number of the 2nd variable in a CPDIS function'),
]
assert len(wcsdict) == len(refcards)
for k, v, c in refcards:
assert wcsdict[k] == (v, c)
def test_d2im_comments():
path = get_pkg_data_filename("data/ie6d07ujq_wcs.fits")
f = fits.open(path)
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(f[0].header, f)
f.close()
wcscards = list(w.to_fits()[0].header['D2IM*'].cards)
wcsdict = {k: (v, c) for k, v, c in wcscards}
refcards = [
('D2IMDIS1', 'LOOKUP', 'Detector to image correction type'),
('D2IM1.EXTVER', 1.0, 'Version number of WCSDVARR extension'),
('D2IM1.NAXES', 2.0, 'Number of independent variables in D2IM function'),
('D2IM1.AXIS.1', 1.0, 'Axis number of the 1st variable in a D2IM function'),
('D2IM1.AXIS.2', 2.0, 'Axis number of the 2nd variable in a D2IM function'),
('D2IMDIS2', 'LOOKUP', 'Detector to image correction type'),
('D2IM2.EXTVER', 2.0, 'Version number of WCSDVARR extension'),
('D2IM2.NAXES', 2.0, 'Number of independent variables in D2IM function'),
('D2IM2.AXIS.1', 1.0, 'Axis number of the 1st variable in a D2IM function'),
('D2IM2.AXIS.2', 2.0, 'Axis number of the 2nd variable in a D2IM function'),
# ('D2IMERR1', 0.049, 'Maximum error of D2IM correction for axis 1'),
# ('D2IMERR2', 0.035, 'Maximum error of D2IM correction for axis 2'),
# ('D2IMEXT', 'iref$y7b1516hi_d2i.fits', ''),
]
assert len(wcsdict) == len(refcards)
for k, v, c in refcards:
assert wcsdict[k] == (v, c)
def test_sip_broken():
# This header caused wcslib to segfault because it has a SIP
# specification in a non-default keyword
hdr = get_pkg_data_contents("data/sip-broken.hdr")
wcs.WCS(hdr)
def test_no_truncate_crval():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [50, 50, 2.12345678e11]
w.wcs.cdelt = [1e-3, 1e-3, 1e8]
w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'FREQ']
w.wcs.set()
header = w.to_header()
for ii in range(3):
assert header[f'CRVAL{ii + 1}'] == w.wcs.crval[ii]
assert header[f'CDELT{ii + 1}'] == w.wcs.cdelt[ii]
def test_no_truncate_crval_try2():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [50, 50, 2.12345678e11]
w.wcs.cdelt = [1e-5, 1e-5, 1e5]
w.wcs.ctype = ['RA---SIN', 'DEC--SIN', 'FREQ']
w.wcs.cunit = ['deg', 'deg', 'Hz']
w.wcs.crpix = [1, 1, 1]
w.wcs.restfrq = 2.34e11
w.wcs.set()
header = w.to_header()
for ii in range(3):
assert header[f'CRVAL{ii + 1}'] == w.wcs.crval[ii]
assert header[f'CDELT{ii + 1}'] == w.wcs.cdelt[ii]
def test_no_truncate_crval_p17():
"""
Regression test for https://github.com/astropy/astropy/issues/5162
"""
w = wcs.WCS(naxis=2)
w.wcs.crval = [50.1234567890123456, 50.1234567890123456]
w.wcs.cdelt = [1e-3, 1e-3]
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w.wcs.set()
header = w.to_header()
assert header['CRVAL1'] != w.wcs.crval[0]
assert header['CRVAL2'] != w.wcs.crval[1]
header = w.to_header(relax=wcs.WCSHDO_P17)
assert header['CRVAL1'] == w.wcs.crval[0]
assert header['CRVAL2'] == w.wcs.crval[1]
def test_no_truncate_using_compare():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
This one uses WCS.wcs.compare and some slightly different values
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [2.409303333333E+02, 50, 2.12345678e11]
w.wcs.cdelt = [1e-3, 1e-3, 1e8]
w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'FREQ']
w.wcs.set()
w2 = wcs.WCS(w.to_header())
w.wcs.compare(w2.wcs)
def test_passing_ImageHDU():
"""
Passing ImageHDU or PrimaryHDU and comparing it with
wcs initialized from header. For #4493.
"""
path = get_pkg_data_filename('data/validate.fits')
with fits.open(path) as hdulist:
with pytest.warns(wcs.FITSFixedWarning):
wcs_hdu = wcs.WCS(hdulist[0])
wcs_header = wcs.WCS(hdulist[0].header)
assert wcs_hdu.wcs.compare(wcs_header.wcs)
wcs_hdu = wcs.WCS(hdulist[1])
wcs_header = wcs.WCS(hdulist[1].header)
assert wcs_hdu.wcs.compare(wcs_header.wcs)
def test_inconsistent_sip():
"""
Test for #4814
"""
hdr = get_pkg_data_contents("data/sip-broken.hdr")
ctx = ctx_for_v71_dateref_warnings()
with ctx:
w = wcs.WCS(hdr)
with pytest.warns(AstropyWarning):
newhdr = w.to_header(relax=None)
# CTYPE should not include "-SIP" if relax is None
with ctx:
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
newhdr = w.to_header(relax=False)
assert 'A_0_2' not in newhdr
# CTYPE should not include "-SIP" if relax is False
with ctx:
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
with pytest.warns(AstropyWarning):
newhdr = w.to_header(key="C")
assert 'A_0_2' not in newhdr
# Test writing header with a different key
with ctx:
wnew = wcs.WCS(newhdr, key='C')
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
with pytest.warns(AstropyWarning):
newhdr = w.to_header(key=" ")
# Test writing a primary WCS to header
with ctx:
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
# Test that "-SIP" is kept into CTYPE if relax=True and
# "-SIP" was in the original header
newhdr = w.to_header(relax=True)
with ctx:
wnew = wcs.WCS(newhdr)
assert all(ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
assert 'A_0_2' in newhdr
# Test that SIP coefficients are also written out.
assert wnew.sip is not None
# ######### broken header ###########
# Test that "-SIP" is added to CTYPE if relax=True and
# "-SIP" was not in the original header but SIP coefficients
# are present.
with ctx:
w = wcs.WCS(hdr)
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
newhdr = w.to_header(relax=True)
with ctx:
wnew = wcs.WCS(newhdr)
assert all(ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
def test_bounds_check():
"""Test for #4957"""
w = wcs.WCS(naxis=2)
w.wcs.ctype = ["RA---CAR", "DEC--CAR"]
w.wcs.cdelt = [10, 10]
w.wcs.crval = [-90, 90]
w.wcs.crpix = [1, 1]
w.wcs.bounds_check(False, False)
ra, dec = w.wcs_pix2world(300, 0, 0)
assert_allclose(ra, -180)
assert_allclose(dec, -30)
def test_naxis():
w = wcs.WCS(naxis=2)
w.wcs.crval = [1, 1]
w.wcs.cdelt = [0.1, 0.1]
w.wcs.crpix = [1, 1]
w._naxis = [1000, 500]
assert w.pixel_shape == (1000, 500)
assert w.array_shape == (500, 1000)
w.pixel_shape = (99, 59)
assert w._naxis == [99, 59]
w.array_shape = (45, 23)
assert w._naxis == [23, 45]
assert w.pixel_shape == (23, 45)
w.pixel_shape = None
assert w.pixel_bounds is None
def test_sip_with_altkey():
"""
Test that when creating a WCS object using a key, CTYPE with
that key is looked at and not the primary CTYPE.
fix for #5443.
"""
with fits.open(get_pkg_data_filename('data/sip.fits')) as f:
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(f[0].header)
# create a header with two WCSs.
h1 = w.to_header(relax=True, key='A')
h2 = w.to_header(relax=False)
h1['CTYPE1A'] = "RA---SIN-SIP"
h1['CTYPE2A'] = "DEC--SIN-SIP"
h1.update(h2)
with ctx_for_v71_dateref_warnings():
w = wcs.WCS(h1, key='A')
assert (w.wcs.ctype == np.array(['RA---SIN-SIP', 'DEC--SIN-SIP'])).all()
def test_to_fits_1():
"""
Test to_fits() with LookupTable distortion.
"""
fits_name = get_pkg_data_filename('data/dist.fits')
with pytest.warns(AstropyDeprecationWarning):
w = wcs.WCS(fits_name)
wfits = w.to_fits()
assert isinstance(wfits, fits.HDUList)
assert isinstance(wfits[0], fits.PrimaryHDU)
assert isinstance(wfits[1], fits.ImageHDU)
def test_keyedsip():
"""
Test sip reading with extra key.
"""
hdr_name = get_pkg_data_filename('data/sip-broken.hdr')
header = fits.Header.fromfile(hdr_name)
del header["CRPIX1"]
del header["CRPIX2"]
w = wcs.WCS(header=header, key="A")
assert isinstance(w.sip, wcs.Sip)
assert w.sip.crpix[0] == 2048
assert w.sip.crpix[1] == 1026
def test_zero_size_input():
with fits.open(get_pkg_data_filename('data/sip.fits')) as f:
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(f[0].header)
inp = np.zeros((0, 2))
assert_array_equal(inp, w.all_pix2world(inp, 0))
assert_array_equal(inp, w.all_world2pix(inp, 0))
inp = [], [1]
result = w.all_pix2world([], [1], 0)
assert_array_equal(inp[0], result[0])
assert_array_equal(inp[1], result[1])
result = w.all_world2pix([], [1], 0)
assert_array_equal(inp[0], result[0])
assert_array_equal(inp[1], result[1])
def test_scalar_inputs():
"""
Issue #7845
"""
wcsobj = wcs.WCS(naxis=1)
result = wcsobj.all_pix2world(2, 1)
assert_array_equal(result, [np.array(2.)])
assert result[0].shape == ()
result = wcsobj.all_pix2world([2], 1)
assert_array_equal(result, [np.array([2.])])
assert result[0].shape == (1,)
# Ignore RuntimeWarning raised on s390.
@pytest.mark.filterwarnings('ignore:.*invalid value encountered in.*')
def test_footprint_contains():
"""
Test WCS.footprint_contains(skycoord)
"""
header = """
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 1045.0 / Pixel coordinate of reference point
CRPIX2 = 1001.0 / Pixel coordinate of reference point
PC1_1 = -0.00556448550786 / Coordinate transformation matrix element
PC1_2 = -0.001042120133257 / Coordinate transformation matrix element
PC2_1 = 0.001181477028705 / Coordinate transformation matrix element
PC2_2 = -0.005590809742987 / Coordinate transformation matrix element
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / TAN (gnomonic) projection + SIP distortions
CTYPE2 = 'DEC--TAN' / TAN (gnomonic) projection + SIP distortions
CRVAL1 = 250.34971683647 / [deg] Coordinate value at reference point
CRVAL2 = 2.2808772582495 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 2.2808772582495 / [deg] Native latitude of celestial pole
RADESYS = 'ICRS' / Equatorial coordinate system
MJD-OBS = 58612.339199259 / [d] MJD of observation matching DATE-OBS
DATE-OBS= '2019-05-09T08:08:26.816Z' / ISO-8601 observation date matching MJD-OB
NAXIS = 2 / NAXIS
NAXIS1 = 2136 / length of first array dimension
NAXIS2 = 2078 / length of second array dimension
""" # noqa
header = fits.Header.fromstring(header.strip(), '\n')
test_wcs = wcs.WCS(header)
hasCoord = test_wcs.footprint_contains(SkyCoord(254, 2, unit='deg'))
assert hasCoord
hasCoord = test_wcs.footprint_contains(SkyCoord(240, 2, unit='deg'))
assert not hasCoord
hasCoord = test_wcs.footprint_contains(SkyCoord(24, 2, unit='deg'))
assert not hasCoord
def test_cunit():
# Initializing WCS
w1 = wcs.WCS(naxis=2)
w2 = wcs.WCS(naxis=2)
w3 = wcs.WCS(naxis=2)
w4 = wcs.WCS(naxis=2)
# Initializing the values of cunit
w1.wcs.cunit = ['deg', 'm/s']
w2.wcs.cunit = ['km/h', 'km/h']
w3.wcs.cunit = ['deg', 'm/s']
w4.wcs.cunit = ['deg', 'deg']
# Equality checking a cunit with itself
assert w1.wcs.cunit == w1.wcs.cunit
assert not w1.wcs.cunit != w1.wcs.cunit
# Equality checking of two different cunit object having same values
assert w1.wcs.cunit == w3.wcs.cunit
assert not w1.wcs.cunit != w3.wcs.cunit
# Equality checking of two different cunit object having the same first unit
# but different second unit (see #9154)
assert not w1.wcs.cunit == w4.wcs.cunit
assert w1.wcs.cunit != w4.wcs.cunit
# Inequality checking of two different cunit object having different values
assert not w1.wcs.cunit == w2.wcs.cunit
assert w1.wcs.cunit != w2.wcs.cunit
# Inequality checking of cunit with a list of literals
assert not w1.wcs.cunit == [1, 2, 3]
assert w1.wcs.cunit != [1, 2, 3]
# Inequality checking with some characters
assert not w1.wcs.cunit == ['a', 'b', 'c']
assert w1.wcs.cunit != ['a', 'b', 'c']
# Comparison is not implemented TypeError will raise
with pytest.raises(TypeError):
w1.wcs.cunit < w2.wcs.cunit
class TestWcsWithTime:
def setup(self):
if _WCSLIB_VER >= Version('7.1'):
fname = get_pkg_data_filename('data/header_with_time_wcslib71.fits')
else:
fname = get_pkg_data_filename('data/header_with_time.fits')
self.header = fits.Header.fromfile(fname)
with pytest.warns(wcs.FITSFixedWarning):
self.w = wcs.WCS(self.header, key='A')
def test_keywods2wcsprm(self):
""" Make sure Wcsprm is populated correctly from the header."""
ctype = [self.header[val] for val in self.header["CTYPE*"]]
crval = [self.header[val] for val in self.header["CRVAL*"]]
crpix = [self.header[val] for val in self.header["CRPIX*"]]
cdelt = [self.header[val] for val in self.header["CDELT*"]]
cunit = [self.header[val] for val in self.header["CUNIT*"]]
assert list(self.w.wcs.ctype) == ctype
time_axis_code = 4000 if _WCSLIB_VER >= Version('7.9') else 0
assert list(self.w.wcs.axis_types) == [2200, 2201, 3300, time_axis_code]
assert_allclose(self.w.wcs.crval, crval)
assert_allclose(self.w.wcs.crpix, crpix)
assert_allclose(self.w.wcs.cdelt, cdelt)
assert list(self.w.wcs.cunit) == cunit
naxis = self.w.naxis
assert naxis == 4
pc = np.zeros((naxis, naxis), dtype=np.float64)
for i in range(1, 5):
for j in range(1, 5):
if i == j:
pc[i-1, j-1] = self.header.get(f'PC{i}_{j}A', 1)
else:
pc[i-1, j-1] = self.header.get(f'PC{i}_{j}A', 0)
assert_allclose(self.w.wcs.pc, pc)
char_keys = ['timesys', 'trefpos', 'trefdir', 'plephem', 'timeunit',
'dateref', 'dateobs', 'datebeg', 'dateavg', 'dateend']
for key in char_keys:
assert getattr(self.w.wcs, key) == self.header.get(key, "")
num_keys = ['mjdref', 'mjdobs', 'mjdbeg', 'mjdend',
'jepoch', 'bepoch', 'tstart', 'tstop', 'xposure',
'timsyer', 'timrder', 'timedel', 'timepixr',
'timeoffs', 'telapse', 'czphs', 'cperi']
for key in num_keys:
if key.upper() == 'MJDREF':
hdrv = [self.header.get('MJDREFIA', np.nan),
self.header.get('MJDREFFA', np.nan)]
else:
hdrv = self.header.get(key, np.nan)
assert_allclose(getattr(self.w.wcs, key), hdrv)
def test_transforms(self):
assert_allclose(self.w.all_pix2world(*self.w.wcs.crpix, 1),
self.w.wcs.crval)
def test_invalid_coordinate_masking():
# Regression test for an issue which caused all coordinates to be set to NaN
# after a transformation rather than just the invalid ones as reported by
# WCSLIB. A specific example of this is that when considering an all-sky
# spectral cube with a spectral axis that is not correlated with the sky
# axes, if transforming pixel coordinates that did not fall 'in' the sky,
# the spectral world value was also masked even though that coordinate
# was valid.
w = wcs.WCS(naxis=3)
w.wcs.ctype = 'VELO_LSR', 'GLON-CAR', 'GLAT-CAR'
w.wcs.crval = -20, 0, 0
w.wcs.crpix = 1, 1441, 241
w.wcs.cdelt = 1.3, -0.125, 0.125
px = [-10, -10, 20]
py = [-10, 10, 20]
pz = [-10, 10, 20]
wx, wy, wz = w.wcs_pix2world(px, py, pz, 0)
# Before fixing this, wx used to return np.nan for the first element
assert_allclose(wx, [-33, -33, 6])
assert_allclose(wy, [np.nan, 178.75, 177.5])
assert_allclose(wz, [np.nan, -28.75, -27.5])
def test_no_pixel_area():
w = wcs.WCS(naxis=3)
# Pixel area cannot be computed
with pytest.raises(ValueError, match='Pixel area is defined only for 2D pixels'):
w.proj_plane_pixel_area()
# Pixel scales still possible
assert_quantity_allclose(w.proj_plane_pixel_scales(), 1)
def test_distortion_header(tmpdir):
"""
Test that plate distortion model is correctly described by `wcs.to_header()`
and preserved when creating a Cutout2D from the image, writing it to FITS,
and reading it back from the file.
"""
path = get_pkg_data_filename("data/dss.14.29.56-62.41.05.fits.gz")
cen = np.array((50, 50))
siz = np.array((20, 20))
with fits.open(path) as hdulist:
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(hdulist[0].header)
cut = Cutout2D(hdulist[0].data, position=cen, size=siz, wcs=w)
# This converts the DSS plate solution model with AMD[XY]n coefficients into a
# Template Polynomial Distortion model (TPD.FWD.n coefficients);
# not testing explicitly for the header keywords here.
if _WCSLIB_VER < Version("7.4"):
with pytest.warns(AstropyWarning, match="WCS contains a TPD distortion model in CQDIS"):
w0 = wcs.WCS(w.to_header_string())
with pytest.warns(AstropyWarning, match="WCS contains a TPD distortion model in CQDIS"):
w1 = wcs.WCS(cut.wcs.to_header_string())
if _WCSLIB_VER >= Version("7.1"):
pytest.xfail("TPD coefficients incomplete with WCSLIB >= 7.1 < 7.4")
else:
w0 = wcs.WCS(w.to_header_string())
w1 = wcs.WCS(cut.wcs.to_header_string())
assert w.pixel_to_world(0, 0).separation(w0.pixel_to_world(0, 0)) < 1.e-3 * u.mas
assert w.pixel_to_world(*cen).separation(w0.pixel_to_world(*cen)) < 1.e-3 * u.mas
assert w.pixel_to_world(*cen).separation(w1.pixel_to_world(*(siz / 2))) < 1.e-3 * u.mas
cutfile = str(tmpdir.join('cutout.fits'))
fits.writeto(cutfile, cut.data, cut.wcs.to_header())
with fits.open(cutfile) as hdulist:
w2 = wcs.WCS(hdulist[0].header)
assert w.pixel_to_world(*cen).separation(w2.pixel_to_world(*(siz / 2))) < 1.e-3 * u.mas
def test_pixlist_wcs_colsel():
"""
Test selection of a specific pixel list WCS using ``colsel``. See #11412.
"""
hdr_file = get_pkg_data_filename('data/chandra-pixlist-wcs.hdr')
hdr = fits.Header.fromtextfile(hdr_file)
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(hdr, keysel=['image', 'pixel'], colsel=[11, 12])
assert w.naxis == 2
assert list(w.wcs.ctype) == ['RA---TAN', 'DEC--TAN']
assert np.allclose(w.wcs.crval, [229.38051931869, -58.81108068885])
assert np.allclose(w.wcs.pc, [[1, 0], [0, 1]])
assert np.allclose(w.wcs.cdelt, [-0.00013666666666666, 0.00013666666666666])
assert np.allclose(w.wcs.lonpole, 180.)
@pytest.mark.skipif(
_WCSLIB_VER < Version('7.8'),
reason="TIME axis extraction only works with wcslib 7.8 or later"
)
def test_time_axis_selection():
w = wcs.WCS(naxis=3)
w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'TIME']
w.wcs.set()
assert list(w.sub([wcs.WCSSUB_TIME]).wcs.ctype) == ['TIME']
assert (w.wcs_pix2world([[1, 2, 3]], 0)[0, 2] ==
w.sub([wcs.WCSSUB_TIME]).wcs_pix2world([[3]], 0)[0, 0])
@pytest.mark.skipif(
_WCSLIB_VER < Version('7.8'),
reason="TIME axis extraction only works with wcslib 7.8 or later"
)
def test_temporal():
w = wcs.WCS(naxis=3)
w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'TIME']
w.wcs.set()
assert w.has_temporal
assert w.sub([wcs.WCSSUB_TIME]).is_temporal
assert (w.wcs_pix2world([[1, 2, 3]], 0)[0, 2] ==
w.temporal.wcs_pix2world([[3]], 0)[0, 0])
def test_swapaxes_same_val_roundtrip():
w = wcs.WCS(naxis=3)
w.wcs.ctype = ["RA---TAN", "DEC--TAN", "FREQ"]
w.wcs.crpix = [32.5, 16.5, 1.]
w.wcs.crval = [5.63, -72.05, 1.]
w.wcs.pc = [[5.9e-06, 1.3e-05, 0.0], [-1.2e-05, 5.0e-06, 0.0], [0.0, 0.0, 1.0]]
w.wcs.cdelt = [1.0, 1.0, 1.0]
w.wcs.set()
axes_order = [3, 2, 1]
axes_order0 = list(i - 1 for i in axes_order)
ws = w.sub(axes_order)
imcoord = np.array([3, 5, 7])
imcoords = imcoord[axes_order0]
val_ref = w.wcs_pix2world([imcoord], 0)[0]
val_swapped = ws.wcs_pix2world([imcoords], 0)[0]
# check original axis and swapped give same results
assert np.allclose(val_ref[axes_order0], val_swapped, rtol=0, atol=1e-8)
# check round-tripping:
assert np.allclose(w.wcs_world2pix([val_ref], 0)[0], imcoord, rtol=0, atol=1e-8)
|
3ddedd7ee830a2398eb1b755021a6d82f89931a5bec183f75b46f4b5764fb503 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Tests for the auxiliary parameters contained in wcsaux
from numpy.testing import assert_allclose
from astropy.io import fits
from astropy.wcs import WCS
STR_EXPECTED_EMPTY = """
rsun_ref:
dsun_obs:
crln_obs:
hgln_obs:
hglt_obs:""".lstrip()
def test_empty():
w = WCS(naxis=1)
assert w.wcs.aux.rsun_ref is None
assert w.wcs.aux.dsun_obs is None
assert w.wcs.aux.crln_obs is None
assert w.wcs.aux.hgln_obs is None
assert w.wcs.aux.hglt_obs is None
assert str(w.wcs.aux) == STR_EXPECTED_EMPTY
HEADER_SOLAR = fits.Header.fromstring("""
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 64.5 / Pixel coordinate of reference point
CRPIX2 = 64.5 / Pixel coordinate of reference point
PC1_1 = 0.99999994260024 / Coordinate transformation matrix element
PC1_2 = -0.00033882076120692 / Coordinate transformation matrix element
PC2_1 = 0.00033882076120692 / Coordinate transformation matrix element
PC2_2 = 0.99999994260024 / Coordinate transformation matrix element
CDELT1 = 0.0053287911111111 / [deg] Coordinate increment at reference point
CDELT2 = 0.0053287911111111 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'HPLN-TAN' / Coordinate type codegnomonic projection
CTYPE2 = 'HPLT-TAN' / Coordinate type codegnomonic projection
CRVAL1 = -0.0012589367249586 / [deg] Coordinate value at reference point
CRVAL2 = 0.00079599300143911 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.00079599300143911 / [deg] Native latitude of celestial pole
DATE-OBS= '2011-02-15T00:00:00.34' / ISO-8601 time of observation
MJD-OBS = 55607.000003935 / [d] MJD at start of observation
RSUN_REF= 696000000.0 / [m] Solar radius
DSUN_OBS= 147724815128.0 / [m] Distance from centre of Sun to observer
CRLN_OBS= 22.814522 / [deg] Carrington heliographic lng of observer
CRLT_OBS= -6.820544 / [deg] Heliographic latitude of observer
HGLN_OBS= 8.431123 / [deg] Stonyhurst heliographic lng of observer
HGLT_OBS= -6.820544 / [deg] Heliographic latitude of observer
""".lstrip(), sep='\n')
STR_EXPECTED_GET = """
rsun_ref: 696000000.000000
dsun_obs: 147724815128.000000
crln_obs: 22.814522
hgln_obs: 8.431123
hglt_obs: -6.820544""".lstrip()
def test_solar_aux_get():
w = WCS(HEADER_SOLAR)
assert_allclose(w.wcs.aux.rsun_ref, 696000000)
assert_allclose(w.wcs.aux.dsun_obs, 147724815128)
assert_allclose(w.wcs.aux.crln_obs, 22.814522)
assert_allclose(w.wcs.aux.hgln_obs, 8.431123)
assert_allclose(w.wcs.aux.hglt_obs, -6.820544)
assert str(w.wcs.aux) == STR_EXPECTED_GET
STR_EXPECTED_SET = """
rsun_ref: 698000000.000000
dsun_obs: 140000000000.000000
crln_obs: 10.000000
hgln_obs: 30.000000
hglt_obs: 40.000000""".lstrip()
def test_solar_aux_set():
w = WCS(HEADER_SOLAR)
w.wcs.aux.rsun_ref = 698000000
assert_allclose(w.wcs.aux.rsun_ref, 698000000)
w.wcs.aux.dsun_obs = 140000000000
assert_allclose(w.wcs.aux.dsun_obs, 140000000000)
w.wcs.aux.crln_obs = 10.
assert_allclose(w.wcs.aux.crln_obs, 10.)
w.wcs.aux.hgln_obs = 30.
assert_allclose(w.wcs.aux.hgln_obs, 30.)
w.wcs.aux.hglt_obs = 40.
assert_allclose(w.wcs.aux.hglt_obs, 40.)
assert str(w.wcs.aux) == STR_EXPECTED_SET
header = w.to_header()
assert_allclose(header['RSUN_REF'], 698000000)
assert_allclose(header['DSUN_OBS'], 140000000000)
assert_allclose(header['CRLN_OBS'], 10.)
assert_allclose(header['HGLN_OBS'], 30.)
assert_allclose(header['HGLT_OBS'], 40.)
def test_set_aux_on_empty():
w = WCS(naxis=2)
w.wcs.aux.rsun_ref = 698000000
assert_allclose(w.wcs.aux.rsun_ref, 698000000)
w.wcs.aux.dsun_obs = 140000000000
assert_allclose(w.wcs.aux.dsun_obs, 140000000000)
w.wcs.aux.crln_obs = 10.
assert_allclose(w.wcs.aux.crln_obs, 10.)
w.wcs.aux.hgln_obs = 30.
assert_allclose(w.wcs.aux.hgln_obs, 30.)
w.wcs.aux.hglt_obs = 40.
assert_allclose(w.wcs.aux.hglt_obs, 40.)
assert str(w.wcs.aux) == STR_EXPECTED_SET
header = w.to_header()
assert_allclose(header['RSUN_REF'], 698000000)
assert_allclose(header['DSUN_OBS'], 140000000000)
assert_allclose(header['CRLN_OBS'], 10.)
assert_allclose(header['HGLN_OBS'], 30.)
assert_allclose(header['HGLT_OBS'], 40.)
def test_unset_aux():
w = WCS(HEADER_SOLAR)
assert w.wcs.aux.rsun_ref is not None
w.wcs.aux.rsun_ref = None
assert w.wcs.aux.rsun_ref is None
assert w.wcs.aux.dsun_obs is not None
w.wcs.aux.dsun_obs = None
assert w.wcs.aux.dsun_obs is None
assert w.wcs.aux.crln_obs is not None
w.wcs.aux.crln_obs = None
assert w.wcs.aux.crln_obs is None
assert w.wcs.aux.hgln_obs is not None
w.wcs.aux.hgln_obs = None
assert w.wcs.aux.hgln_obs is None
assert w.wcs.aux.hglt_obs is not None
w.wcs.aux.hglt_obs = None
assert w.wcs.aux.hglt_obs is None
assert str(w.wcs.aux) == 'rsun_ref:\ndsun_obs:\ncrln_obs:\nhgln_obs:\nhglt_obs:'
header = w.to_header()
assert 'RSUN_REF' not in header
assert 'DSUN_OBS' not in header
assert 'CRLN_OBS' not in header
assert 'HGLN_OBS' not in header
assert 'HGLT_OBS' not in header
|
a63d81c09307399eaef393af9f68ab392602b3b4366ee882866d9b0e046000af | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import gc
import locale
import re
from packaging.version import Version
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from astropy.io import fits
from astropy.wcs import wcs
from astropy.wcs import _wcs
from astropy.wcs.wcs import FITSFixedWarning
from astropy.utils.data import (
get_pkg_data_contents, get_pkg_data_fileobj, get_pkg_data_filename)
from astropy.utils.misc import _set_locale
from astropy import units as u
from astropy.units.core import UnitsWarning
######################################################################
def test_alt():
w = _wcs.Wcsprm()
assert w.alt == " "
w.alt = "X"
assert w.alt == "X"
del w.alt
assert w.alt == " "
def test_alt_invalid1():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.alt = "$"
def test_alt_invalid2():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.alt = " "
def test_axis_types():
w = _wcs.Wcsprm()
assert_array_equal(w.axis_types, [0, 0])
def test_cd():
w = _wcs.Wcsprm()
w.cd = [[1, 0], [0, 1]]
assert w.cd.dtype == float
assert w.has_cd() is True
assert_array_equal(w.cd, [[1, 0], [0, 1]])
del w.cd
assert w.has_cd() is False
def test_cd_missing():
w = _wcs.Wcsprm()
assert w.has_cd() is False
with pytest.raises(AttributeError):
w.cd
def test_cd_missing2():
w = _wcs.Wcsprm()
w.cd = [[1, 0], [0, 1]]
assert w.has_cd() is True
del w.cd
assert w.has_cd() is False
with pytest.raises(AttributeError):
w.cd
def test_cd_invalid():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.cd = [1, 0, 0, 1]
def test_cdfix():
w = _wcs.Wcsprm()
w.cdfix()
def test_cdelt():
w = _wcs.Wcsprm()
assert_array_equal(w.cdelt, [1, 1])
w.cdelt = [42, 54]
assert_array_equal(w.cdelt, [42, 54])
def test_cdelt_delete():
w = _wcs.Wcsprm()
with pytest.raises(TypeError):
del w.cdelt
def test_cel_offset():
w = _wcs.Wcsprm()
assert w.cel_offset is False
w.cel_offset = 'foo'
assert w.cel_offset is True
w.cel_offset = 0
assert w.cel_offset is False
def test_celfix():
# TODO: We need some data with -NCP or -GLS projections to test
# with. For now, this is just a smoke test
w = _wcs.Wcsprm()
assert w.celfix() == -1
def test_cname():
w = _wcs.Wcsprm()
# Test that this works as an iterator
for x in w.cname:
assert x == ''
assert list(w.cname) == ['', '']
w.cname = [b'foo', 'bar']
assert list(w.cname) == ['foo', 'bar']
def test_cname_invalid():
w = _wcs.Wcsprm()
with pytest.raises(TypeError):
w.cname = [42, 54]
def test_colax():
w = _wcs.Wcsprm()
assert w.colax.dtype == np.intc
assert_array_equal(w.colax, [0, 0])
w.colax = [42, 54]
assert_array_equal(w.colax, [42, 54])
w.colax[0] = 0
assert_array_equal(w.colax, [0, 54])
with pytest.raises(ValueError):
w.colax = [1, 2, 3]
def test_colnum():
w = _wcs.Wcsprm()
assert w.colnum == 0
w.colnum = 42
assert w.colnum == 42
with pytest.raises(OverflowError):
w.colnum = 0xffffffffffffffffffff
with pytest.raises(OverflowError):
w.colnum = 0xffffffff
with pytest.raises(TypeError):
del w.colnum
def test_colnum_invalid():
w = _wcs.Wcsprm()
with pytest.raises(TypeError):
w.colnum = 'foo'
def test_crder():
w = _wcs.Wcsprm()
assert w.crder.dtype == float
assert np.all(np.isnan(w.crder))
w.crder[0] = 0
assert np.isnan(w.crder[1])
assert w.crder[0] == 0
w.crder = w.crder
def test_crota():
w = _wcs.Wcsprm()
w.crota = [1, 0]
assert w.crota.dtype == float
assert w.has_crota() is True
assert_array_equal(w.crota, [1, 0])
del w.crota
assert w.has_crota() is False
def test_crota_missing():
w = _wcs.Wcsprm()
assert w.has_crota() is False
with pytest.raises(AttributeError):
w.crota
def test_crota_missing2():
w = _wcs.Wcsprm()
w.crota = [1, 0]
assert w.has_crota() is True
del w.crota
assert w.has_crota() is False
with pytest.raises(AttributeError):
w.crota
def test_crpix():
w = _wcs.Wcsprm()
assert w.crpix.dtype == float
assert_array_equal(w.crpix, [0, 0])
w.crpix = [42, 54]
assert_array_equal(w.crpix, [42, 54])
w.crpix[0] = 0
assert_array_equal(w.crpix, [0, 54])
with pytest.raises(ValueError):
w.crpix = [1, 2, 3]
def test_crval():
w = _wcs.Wcsprm()
assert w.crval.dtype == float
assert_array_equal(w.crval, [0, 0])
w.crval = [42, 54]
assert_array_equal(w.crval, [42, 54])
w.crval[0] = 0
assert_array_equal(w.crval, [0, 54])
def test_csyer():
w = _wcs.Wcsprm()
assert w.csyer.dtype == float
assert np.all(np.isnan(w.csyer))
w.csyer[0] = 0
assert np.isnan(w.csyer[1])
assert w.csyer[0] == 0
w.csyer = w.csyer
def test_ctype():
w = _wcs.Wcsprm()
assert list(w.ctype) == ['', '']
w.ctype = [b'RA---TAN', 'DEC--TAN']
assert_array_equal(w.axis_types, [2200, 2201])
assert w.lat == 1
assert w.lng == 0
assert w.lattyp == 'DEC'
assert w.lngtyp == 'RA'
assert list(w.ctype) == ['RA---TAN', 'DEC--TAN']
w.ctype = ['foo', 'bar']
assert_array_equal(w.axis_types, [0, 0])
assert list(w.ctype) == ['foo', 'bar']
assert w.lat == -1
assert w.lng == -1
assert w.lattyp == 'DEC'
assert w.lngtyp == 'RA'
def test_ctype_repr():
w = _wcs.Wcsprm()
assert list(w.ctype) == ['', '']
w.ctype = [b'RA-\t--TAN', 'DEC-\n-TAN']
assert repr(w.ctype == '["RA-\t--TAN", "DEC-\n-TAN"]')
def test_ctype_index_error():
w = _wcs.Wcsprm()
assert list(w.ctype) == ['', '']
for idx in (2, -3):
with pytest.raises(IndexError):
w.ctype[idx]
with pytest.raises(IndexError):
w.ctype[idx] = 'FOO'
def test_ctype_invalid_error():
w = _wcs.Wcsprm()
assert list(w.ctype) == ['', '']
with pytest.raises(ValueError):
w.ctype[0] = 'X' * 100
with pytest.raises(TypeError):
w.ctype[0] = True
with pytest.raises(TypeError):
w.ctype = ['a', 0]
with pytest.raises(TypeError):
w.ctype = None
with pytest.raises(ValueError):
w.ctype = ['a', 'b', 'c']
with pytest.raises(ValueError):
w.ctype = ['FOO', 'A' * 100]
def test_cubeface():
w = _wcs.Wcsprm()
assert w.cubeface == -1
w.cubeface = 0
with pytest.raises(OverflowError):
w.cubeface = -1
def test_cunit():
w = _wcs.Wcsprm()
assert list(w.cunit) == [u.Unit(''), u.Unit('')]
w.cunit = [u.m, 'km']
assert w.cunit[0] == u.m
assert w.cunit[1] == u.km
def test_cunit_invalid():
w = _wcs.Wcsprm()
with pytest.warns(u.UnitsWarning, match='foo') as warns:
w.cunit[0] = 'foo'
assert len(warns) == 1
def test_cunit_invalid2():
w = _wcs.Wcsprm()
with pytest.warns(u.UnitsWarning) as warns:
w.cunit = ['foo', 'bar']
assert len(warns) == 2
assert 'foo' in str(warns[0].message)
assert 'bar' in str(warns[1].message)
def test_unit():
w = wcs.WCS()
w.wcs.cunit[0] = u.erg
assert w.wcs.cunit[0] == u.erg
assert repr(w.wcs.cunit) == "['erg', '']"
def test_unit2():
w = wcs.WCS()
with pytest.warns(UnitsWarning):
myunit = u.Unit("FOOBAR", parse_strict="warn")
w.wcs.cunit[0] = myunit
def test_unit3():
w = wcs.WCS()
for idx in (2, -3):
with pytest.raises(IndexError):
w.wcs.cunit[idx]
with pytest.raises(IndexError):
w.wcs.cunit[idx] = u.m
with pytest.raises(ValueError):
w.wcs.cunit = [u.m, u.m, u.m]
def test_unitfix():
w = _wcs.Wcsprm()
w.unitfix()
def test_cylfix():
# TODO: We need some data with broken cylindrical projections to
# test with. For now, this is just a smoke test.
w = _wcs.Wcsprm()
assert w.cylfix() == -1
assert w.cylfix([0, 1]) == -1
with pytest.raises(ValueError):
w.cylfix([0, 1, 2])
def test_dateavg():
w = _wcs.Wcsprm()
assert w.dateavg == ''
# TODO: When dateavg is verified, check that it works
def test_dateobs():
w = _wcs.Wcsprm()
assert w.dateobs == ''
# TODO: When dateavg is verified, check that it works
def test_datfix():
w = _wcs.Wcsprm()
w.dateobs = '31/12/99'
assert w.datfix() == 0
assert w.dateobs == '1999-12-31'
assert w.mjdobs == 51543.0
def test_equinox():
w = _wcs.Wcsprm()
assert np.isnan(w.equinox)
w.equinox = 0
assert w.equinox == 0
del w.equinox
assert np.isnan(w.equinox)
with pytest.raises(TypeError):
w.equinox = None
def test_fix():
w = _wcs.Wcsprm()
fix_ref = {
'cdfix': 'No change',
'cylfix': 'No change',
'obsfix': 'No change',
'datfix': 'No change',
'spcfix': 'No change',
'unitfix': 'No change',
'celfix': 'No change',
'obsfix': 'No change'}
version = wcs._wcs.__version__
if Version(version) <= Version('5'):
del fix_ref['obsfix']
if Version(version) >= Version('7.1'):
w.dateref = '1858-11-17'
if Version('7.4') <= Version(version) < Version('7.6'):
fix_ref['datfix'] = 'Success'
assert w.fix() == fix_ref
def test_fix2():
w = _wcs.Wcsprm()
w.dateobs = '31/12/99'
fix_ref = {
'cdfix': 'No change',
'cylfix': 'No change',
'obsfix': 'No change',
'datfix': "Set MJD-OBS to 51543.000000 from DATE-OBS.\nChanged DATE-OBS from '31/12/99' to '1999-12-31'", # noqa
'spcfix': 'No change',
'unitfix': 'No change',
'celfix': 'No change'}
version = wcs._wcs.__version__
if Version(version) <= Version("5"):
del fix_ref['obsfix']
fix_ref['datfix'] = "Changed '31/12/99' to '1999-12-31'"
if Version(version) >= Version('7.3'):
fix_ref['datfix'] = "Set DATEREF to '1858-11-17' from MJDREF.\n" + fix_ref['datfix']
elif Version(version) >= Version('7.1'):
fix_ref['datfix'] = "Set DATE-REF to '1858-11-17' from MJD-REF.\n" + fix_ref['datfix']
assert w.fix() == fix_ref
assert w.dateobs == '1999-12-31'
assert w.mjdobs == 51543.0
def test_fix3():
w = _wcs.Wcsprm()
w.dateobs = '31/12/F9'
fix_ref = {
'cdfix': 'No change',
'cylfix': 'No change',
'obsfix': 'No change',
'datfix': "Invalid DATE-OBS format '31/12/F9'",
'spcfix': 'No change',
'unitfix': 'No change',
'celfix': 'No change'
}
version = wcs._wcs.__version__
if Version(version) <= Version("5"):
del fix_ref['obsfix']
fix_ref['datfix'] = "Invalid parameter value: invalid date '31/12/F9'"
if Version(version) >= Version('7.3'):
fix_ref['datfix'] = "Set DATEREF to '1858-11-17' from MJDREF.\n" + fix_ref['datfix']
elif Version(version) >= Version('7.1'):
fix_ref['datfix'] = "Set DATE-REF to '1858-11-17' from MJD-REF.\n" + fix_ref['datfix']
assert w.fix() == fix_ref
assert w.dateobs == '31/12/F9'
assert np.isnan(w.mjdobs)
def test_fix4():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.fix('X')
def test_fix5():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.fix(naxis=[0, 1, 2])
def test_get_ps():
# TODO: We need some data with PSi_ma keywords
w = _wcs.Wcsprm()
assert len(w.get_ps()) == 0
def test_get_pv():
# TODO: We need some data with PVi_ma keywords
w = _wcs.Wcsprm()
assert len(w.get_pv()) == 0
def test_imgpix_matrix():
w = _wcs.Wcsprm()
with pytest.raises(AssertionError):
w.imgpix_matrix
def test_imgpix_matrix2():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.imgpix_matrix = None
def test_isunity():
w = _wcs.Wcsprm()
assert(w.is_unity())
def test_lat():
w = _wcs.Wcsprm()
assert w.lat == -1
def test_lat_set():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.lat = 0
def test_latpole():
w = _wcs.Wcsprm()
assert w.latpole == 90.0
w.latpole = 45.0
assert w.latpole == 45.0
del w.latpole
assert w.latpole == 90.0
def test_lattyp():
w = _wcs.Wcsprm()
assert w.lattyp == " "
def test_lattyp_set():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.lattyp = 0
def test_lng():
w = _wcs.Wcsprm()
assert w.lng == -1
def test_lng_set():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.lng = 0
def test_lngtyp():
w = _wcs.Wcsprm()
assert w.lngtyp == " "
def test_lngtyp_set():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.lngtyp = 0
def test_lonpole():
w = _wcs.Wcsprm()
assert np.isnan(w.lonpole)
w.lonpole = 45.0
assert w.lonpole == 45.0
del w.lonpole
assert np.isnan(w.lonpole)
def test_mix():
w = _wcs.Wcsprm()
w.ctype = [b'RA---TAN', 'DEC--TAN']
with pytest.raises(_wcs.InvalidCoordinateError):
w.mix(1, 1, [240, 480], 1, 5, [0, 2], [54, 32], 1)
def test_mjdavg():
w = _wcs.Wcsprm()
assert np.isnan(w.mjdavg)
w.mjdavg = 45.0
assert w.mjdavg == 45.0
del w.mjdavg
assert np.isnan(w.mjdavg)
def test_mjdobs():
w = _wcs.Wcsprm()
assert np.isnan(w.mjdobs)
w.mjdobs = 45.0
assert w.mjdobs == 45.0
del w.mjdobs
assert np.isnan(w.mjdobs)
def test_name():
w = _wcs.Wcsprm()
assert w.name == ''
w.name = 'foo'
assert w.name == 'foo'
def test_naxis():
w = _wcs.Wcsprm()
assert w.naxis == 2
def test_naxis_set():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.naxis = 4
def test_obsgeo():
w = _wcs.Wcsprm()
assert np.all(np.isnan(w.obsgeo))
w.obsgeo = [1, 2, 3, 4, 5, 6]
assert_array_equal(w.obsgeo, [1, 2, 3, 4, 5, 6])
del w.obsgeo
assert np.all(np.isnan(w.obsgeo))
def test_pc():
w = _wcs.Wcsprm()
assert w.has_pc()
assert_array_equal(w.pc, [[1, 0], [0, 1]])
w.cd = [[1, 0], [0, 1]]
assert not w.has_pc()
del w.cd
assert w.has_pc()
assert_array_equal(w.pc, [[1, 0], [0, 1]])
w.pc = w.pc
def test_pc_missing():
w = _wcs.Wcsprm()
w.cd = [[1, 0], [0, 1]]
assert not w.has_pc()
with pytest.raises(AttributeError):
w.pc
def test_phi0():
w = _wcs.Wcsprm()
assert np.isnan(w.phi0)
w.phi0 = 42.0
assert w.phi0 == 42.0
del w.phi0
assert np.isnan(w.phi0)
def test_piximg_matrix():
w = _wcs.Wcsprm()
with pytest.raises(AssertionError):
w.piximg_matrix
def test_piximg_matrix2():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.piximg_matrix = None
def test_print_contents():
# In general, this is human-consumable, so we don't care if the
# content changes, just check the type
w = _wcs.Wcsprm()
assert isinstance(str(w), str)
def test_radesys():
w = _wcs.Wcsprm()
assert w.radesys == ''
w.radesys = 'foo'
assert w.radesys == 'foo'
def test_restfrq():
w = _wcs.Wcsprm()
assert w.restfrq == 0.0
w.restfrq = np.nan
assert np.isnan(w.restfrq)
del w.restfrq
def test_restwav():
w = _wcs.Wcsprm()
assert w.restwav == 0.0
w.restwav = np.nan
assert np.isnan(w.restwav)
del w.restwav
def test_set_ps():
w = _wcs.Wcsprm()
data = [(0, 0, "param1"), (1, 1, "param2")]
w.set_ps(data)
assert w.get_ps() == data
def test_set_ps_realloc():
w = _wcs.Wcsprm()
w.set_ps([(0, 0, "param1")] * 16)
def test_set_pv():
w = _wcs.Wcsprm()
data = [(0, 0, 42.), (1, 1, 54.)]
w.set_pv(data)
assert w.get_pv() == data
def test_set_pv_realloc():
w = _wcs.Wcsprm()
w.set_pv([(0, 0, 42.)] * 16)
def test_spcfix():
# TODO: We need some data with broken spectral headers here to
# really test
header = get_pkg_data_contents(
'data/spectra/orion-velo-1.hdr', encoding='binary')
w = _wcs.Wcsprm(header)
assert w.spcfix() == -1
def test_spec():
w = _wcs.Wcsprm()
assert w.spec == -1
def test_spec_set():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.spec = 0
def test_specsys():
w = _wcs.Wcsprm()
assert w.specsys == ''
w.specsys = 'foo'
assert w.specsys == 'foo'
def test_sptr():
# TODO: Write me
pass
def test_ssysobs():
w = _wcs.Wcsprm()
assert w.ssysobs == ''
w.ssysobs = 'foo'
assert w.ssysobs == 'foo'
def test_ssyssrc():
w = _wcs.Wcsprm()
assert w.ssyssrc == ''
w.ssyssrc = 'foo'
assert w.ssyssrc == 'foo'
def test_tab():
w = _wcs.Wcsprm()
assert len(w.tab) == 0
# TODO: Inject some headers that have tables and test
def test_theta0():
w = _wcs.Wcsprm()
assert np.isnan(w.theta0)
w.theta0 = 42.0
assert w.theta0 == 42.0
del w.theta0
assert np.isnan(w.theta0)
def test_toheader():
w = _wcs.Wcsprm()
assert isinstance(w.to_header(), str)
def test_velangl():
w = _wcs.Wcsprm()
assert np.isnan(w.velangl)
w.velangl = 42.0
assert w.velangl == 42.0
del w.velangl
assert np.isnan(w.velangl)
def test_velosys():
w = _wcs.Wcsprm()
assert np.isnan(w.velosys)
w.velosys = 42.0
assert w.velosys == 42.0
del w.velosys
assert np.isnan(w.velosys)
def test_velref():
w = _wcs.Wcsprm()
assert w.velref == 0.0
w.velref = 42
assert w.velref == 42.0
del w.velref
assert w.velref == 0.0
def test_zsource():
w = _wcs.Wcsprm()
assert np.isnan(w.zsource)
w.zsource = 42.0
assert w.zsource == 42.0
del w.zsource
assert np.isnan(w.zsource)
def test_cd_3d():
header = get_pkg_data_contents('data/3d_cd.hdr', encoding='binary')
w = _wcs.Wcsprm(header)
assert w.cd.shape == (3, 3)
assert w.get_pc().shape == (3, 3)
assert w.get_cdelt().shape == (3,)
def test_get_pc():
header = get_pkg_data_contents('data/3d_cd.hdr', encoding='binary')
w = _wcs.Wcsprm(header)
pc = w.get_pc()
try:
pc[0, 0] = 42
except (RuntimeError, ValueError):
pass
else:
raise AssertionError()
def test_detailed_err():
w = _wcs.Wcsprm()
w.pc = [[0, 0], [0, 0]]
with pytest.raises(_wcs.SingularMatrixError):
w.set()
def test_header_parse():
from astropy.io import fits
with get_pkg_data_fileobj(
'data/header_newlines.fits', encoding='binary') as test_file:
hdulist = fits.open(test_file)
with pytest.warns(FITSFixedWarning):
w = wcs.WCS(hdulist[0].header)
assert w.wcs.ctype[0] == 'RA---TAN-SIP'
def test_locale():
try:
with _set_locale('fr_FR'):
header = get_pkg_data_contents('data/locale.hdr',
encoding='binary')
with pytest.warns(FITSFixedWarning):
w = _wcs.Wcsprm(header)
assert re.search("[0-9]+,[0-9]*", w.to_header()) is None
except locale.Error:
pytest.xfail(
"Can't set to 'fr_FR' locale, perhaps because it is not installed "
"on this system")
def test_unicode():
w = _wcs.Wcsprm()
with pytest.raises(UnicodeEncodeError):
w.alt = "‰"
def test_sub_segfault():
# Issue #1960
header = fits.Header.fromtextfile(
get_pkg_data_filename('data/sub-segfault.hdr'))
w = wcs.WCS(header)
w.sub([wcs.WCSSUB_CELESTIAL])
gc.collect()
def test_bounds_check():
w = _wcs.Wcsprm()
w.bounds_check(False)
def test_wcs_sub_error_message():
# Issue #1587
w = _wcs.Wcsprm()
with pytest.raises(TypeError) as e:
w.sub('latitude')
assert e.match("axes must None, a sequence or an integer$")
def test_wcs_sub():
# Issue #3356
w = _wcs.Wcsprm()
w.sub(['latitude'])
w = _wcs.Wcsprm()
w.sub([b'latitude'])
def test_compare():
header = get_pkg_data_contents('data/3d_cd.hdr', encoding='binary')
w = _wcs.Wcsprm(header)
w2 = _wcs.Wcsprm(header)
assert w == w2
w.equinox = 42
assert w == w2
assert not w.compare(w2)
assert w.compare(w2, _wcs.WCSCOMPARE_ANCILLARY)
w = _wcs.Wcsprm(header)
w2 = _wcs.Wcsprm(header)
with pytest.warns(RuntimeWarning):
w.cdelt[0] = np.float32(0.00416666666666666666666666)
w2.cdelt[0] = np.float64(0.00416666666666666666666666)
assert not w.compare(w2)
assert w.compare(w2, tolerance=1e-6)
def test_radesys_defaults():
w = _wcs.Wcsprm()
w.ctype = ['RA---TAN', 'DEC--TAN']
w.set()
assert w.radesys == "ICRS"
def test_radesys_defaults_full():
# As described in Section 3.1 of the FITS standard "Equatorial and ecliptic
# coordinates", for those systems the RADESYS keyword can be used to
# indicate the equatorial/ecliptic frame to use. From the standard:
# "For RADESYSa values of FK4 and FK4-NO-E, any stated equinox is Besselian
# and, if neither EQUINOXa nor EPOCH are given, a default of 1950.0 is to
# be taken. For FK5, any stated equinox is Julian and, if neither keyword
# is given, it defaults to 2000.0.
# "If the EQUINOXa keyword is given it should always be accompanied by
# RADESYS a. However, if it should happen to ap- pear by itself then
# RADESYSa defaults to FK4 if EQUINOXa < 1984.0, or to FK5 if EQUINOXa
# 1984.0. Note that these defaults, while probably true of older files
# using the EPOCH keyword, are not required of them.
# By default RADESYS is empty
w = _wcs.Wcsprm(naxis=2)
assert w.radesys == ''
assert np.isnan(w.equinox)
# For non-ecliptic or equatorial systems it is still empty
w = _wcs.Wcsprm(naxis=2)
for ctype in [('GLON-CAR', 'GLAT-CAR'),
('SLON-SIN', 'SLAT-SIN')]:
w.ctype = ctype
w.set()
assert w.radesys == ''
assert np.isnan(w.equinox)
for ctype in [('RA---TAN', 'DEC--TAN'),
('ELON-TAN', 'ELAT-TAN'),
('DEC--TAN', 'RA---TAN'),
('ELAT-TAN', 'ELON-TAN')]:
# Check defaults for RADESYS
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.set()
assert w.radesys == 'ICRS'
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.equinox = 1980
w.set()
assert w.radesys == 'FK4'
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.equinox = 1984
w.set()
assert w.radesys == 'FK5'
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = 'foo'
w.set()
assert w.radesys == 'foo'
# Check defaults for EQUINOX
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.set()
assert np.isnan(w.equinox) # frame is ICRS, no equinox
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = 'ICRS'
w.set()
assert np.isnan(w.equinox)
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = 'FK5'
w.set()
assert w.equinox == 2000.
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = 'FK4'
w.set()
assert w.equinox == 1950
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = 'FK4-NO-E'
w.set()
assert w.equinox == 1950
def test_iteration():
world = np.array(
[[-0.58995335, -0.5],
[0.00664326, -0.5],
[-0.58995335, -0.25],
[0.00664326, -0.25],
[-0.58995335, 0.],
[0.00664326, 0.],
[-0.58995335, 0.25],
[0.00664326, 0.25],
[-0.58995335, 0.5],
[0.00664326, 0.5]],
float
)
w = wcs.WCS()
w.wcs.ctype = ['GLON-CAR', 'GLAT-CAR']
w.wcs.cdelt = [-0.006666666828, 0.006666666828]
w.wcs.crpix = [75.907, 74.8485]
x = w.wcs_world2pix(world, 1)
expected = np.array(
[[1.64400000e+02, -1.51498185e-01],
[7.49105110e+01, -1.51498185e-01],
[1.64400000e+02, 3.73485009e+01],
[7.49105110e+01, 3.73485009e+01],
[1.64400000e+02, 7.48485000e+01],
[7.49105110e+01, 7.48485000e+01],
[1.64400000e+02, 1.12348499e+02],
[7.49105110e+01, 1.12348499e+02],
[1.64400000e+02, 1.49848498e+02],
[7.49105110e+01, 1.49848498e+02]],
float)
assert_array_almost_equal(x, expected)
w2 = w.wcs_pix2world(x, 1)
world[:, 0] %= 360.
assert_array_almost_equal(w2, world)
def test_invalid_args():
with pytest.raises(TypeError):
_wcs.Wcsprm(keysel='A')
with pytest.raises(ValueError):
_wcs.Wcsprm(keysel=2)
with pytest.raises(ValueError):
_wcs.Wcsprm(colsel=2)
with pytest.raises(ValueError):
_wcs.Wcsprm(naxis=64)
header = get_pkg_data_contents(
'data/spectra/orion-velo-1.hdr', encoding='binary')
with pytest.raises(ValueError):
_wcs.Wcsprm(header, relax='FOO')
with pytest.raises(ValueError):
_wcs.Wcsprm(header, naxis=3)
with pytest.raises(KeyError):
_wcs.Wcsprm(header, key='A')
# Test keywords in the Time standard
def test_datebeg():
w = _wcs.Wcsprm()
assert w.datebeg == ''
w.datebeg = '2001-02-11'
assert w.datebeg == '2001-02-11'
w.datebeg = '31/12/99'
fix_ref = {
'cdfix': 'No change',
'cylfix': 'No change',
'obsfix': 'No change',
'datfix': "Invalid DATE-BEG format '31/12/99'",
'spcfix': 'No change',
'unitfix': 'No change',
'celfix': 'No change'}
if Version(wcs._wcs.__version__) >= Version('7.3'):
fix_ref['datfix'] = "Set DATEREF to '1858-11-17' from MJDREF.\n" + fix_ref['datfix']
elif Version(wcs._wcs.__version__) >= Version('7.1'):
fix_ref['datfix'] = "Set DATE-REF to '1858-11-17' from MJD-REF.\n" + fix_ref['datfix']
assert w.fix() == fix_ref
char_keys = ['timesys', 'trefpos', 'trefdir', 'plephem', 'timeunit',
'dateref', 'dateavg', 'dateend']
@pytest.mark.parametrize('key', char_keys)
def test_char_keys(key):
w = _wcs.Wcsprm()
assert getattr(w, key) == ''
setattr(w, key, "foo")
assert getattr(w, key) == 'foo'
with pytest.raises(TypeError):
setattr(w, key, 42)
num_keys = ['mjdobs', 'mjdbeg', 'mjdend', 'jepoch',
'bepoch', 'tstart', 'tstop', 'xposure', 'timsyer',
'timrder', 'timedel', 'timepixr', 'timeoffs',
'telapse', 'xposure']
@pytest.mark.parametrize('key', num_keys)
def test_num_keys(key):
w = _wcs.Wcsprm()
assert np.isnan(getattr(w, key))
setattr(w, key, 42.0)
assert getattr(w, key) == 42.0
delattr(w, key)
assert np.isnan(getattr(w, key))
with pytest.raises(TypeError):
setattr(w, key, "foo")
@pytest.mark.parametrize('key', ['czphs', 'cperi', 'mjdref'])
def test_array_keys(key):
w = _wcs.Wcsprm()
attr = getattr(w, key)
if key == 'mjdref' and Version(_wcs.__version__) >= Version('7.1'):
assert np.allclose(attr, [0, 0])
else:
assert np.all(np.isnan(attr))
assert attr.dtype == float
setattr(w, key, [1., 2.])
assert_array_equal(getattr(w, key), [1., 2.])
with pytest.raises(ValueError):
setattr(w, key, ["foo", "bar"])
with pytest.raises(ValueError):
setattr(w, key, "foo")
|
89ed2a3746da4c2c99da0eb15c23e3fd48e87c93ea33d4ab590457a99c6d1a3c | import numbers
from collections import defaultdict
import numpy as np
from astropy.utils import isiterable
from astropy.utils.decorators import lazyproperty
from ..low_level_api import BaseLowLevelWCS
from .base import BaseWCSWrapper
__all__ = ['sanitize_slices', 'SlicedLowLevelWCS']
def sanitize_slices(slices, ndim):
"""
Given a slice as input sanitise it to an easier to parse format.format
This function returns a list ``ndim`` long containing slice objects (or ints).
"""
if not isinstance(slices, (tuple, list)): # We just have a single int
slices = (slices,)
if len(slices) > ndim:
raise ValueError(
f"The dimensionality of the specified slice {slices} can not be greater "
f"than the dimensionality ({ndim}) of the wcs.")
if any(isiterable(s) for s in slices):
raise IndexError("This slice is invalid, only integer or range slices are supported.")
slices = list(slices)
if Ellipsis in slices:
if slices.count(Ellipsis) > 1:
raise IndexError("an index can only have a single ellipsis ('...')")
# Replace the Ellipsis with the correct number of slice(None)s
e_ind = slices.index(Ellipsis)
slices.remove(Ellipsis)
n_e = ndim - len(slices)
for i in range(n_e):
ind = e_ind + i
slices.insert(ind, slice(None))
for i in range(ndim):
if i < len(slices):
slc = slices[i]
if isinstance(slc, slice):
if slc.step and slc.step != 1:
raise IndexError("Slicing WCS with a step is not supported.")
elif not isinstance(slc, numbers.Integral):
raise IndexError("Only integer or range slices are accepted.")
else:
slices.append(slice(None))
return slices
def combine_slices(slice1, slice2):
"""
Given two slices that can be applied to a 1-d array, find the resulting
slice that corresponds to the combination of both slices. We assume that
slice2 can be an integer, but slice1 cannot.
"""
if isinstance(slice1, slice) and slice1.step is not None:
raise ValueError('Only slices with steps of 1 are supported')
if isinstance(slice2, slice) and slice2.step is not None:
raise ValueError('Only slices with steps of 1 are supported')
if isinstance(slice2, numbers.Integral):
if slice1.start is None:
return slice2
else:
return slice2 + slice1.start
if slice1.start is None:
if slice1.stop is None:
return slice2
else:
if slice2.stop is None:
return slice(slice2.start, slice1.stop)
else:
return slice(slice2.start, min(slice1.stop, slice2.stop))
else:
if slice2.start is None:
start = slice1.start
else:
start = slice1.start + slice2.start
if slice2.stop is None:
stop = slice1.stop
else:
if slice1.start is None:
stop = slice2.stop
else:
stop = slice2.stop + slice1.start
if slice1.stop is not None:
stop = min(slice1.stop, stop)
return slice(start, stop)
class SlicedLowLevelWCS(BaseWCSWrapper):
"""
A Low Level WCS wrapper which applies an array slice to a WCS.
This class does not modify the underlying WCS object and can therefore drop
coupled dimensions as it stores which pixel and world dimensions have been
sliced out (or modified) in the underlying WCS and returns the modified
results on all the Low Level WCS methods.
Parameters
----------
wcs : `~astropy.wcs.wcsapi.BaseLowLevelWCS`
The WCS to slice.
slices : `slice` or `tuple` or `int`
A valid array slice to apply to the WCS.
"""
def __init__(self, wcs, slices):
slices = sanitize_slices(slices, wcs.pixel_n_dim)
if isinstance(wcs, SlicedLowLevelWCS):
# Here we combine the current slices with the previous slices
# to avoid ending up with many nested WCSes
self._wcs = wcs._wcs
slices_original = wcs._slices_array.copy()
for ipixel in range(wcs.pixel_n_dim):
ipixel_orig = wcs._wcs.pixel_n_dim - 1 - wcs._pixel_keep[ipixel]
ipixel_new = wcs.pixel_n_dim - 1 - ipixel
slices_original[ipixel_orig] = combine_slices(slices_original[ipixel_orig],
slices[ipixel_new])
self._slices_array = slices_original
else:
self._wcs = wcs
self._slices_array = slices
self._slices_pixel = self._slices_array[::-1]
# figure out which pixel dimensions have been kept, then use axis correlation
# matrix to figure out which world dims are kept
self._pixel_keep = np.nonzero([not isinstance(self._slices_pixel[ip], numbers.Integral)
for ip in range(self._wcs.pixel_n_dim)])[0]
# axis_correlation_matrix[world, pixel]
self._world_keep = np.nonzero(
self._wcs.axis_correlation_matrix[:, self._pixel_keep].any(axis=1))[0]
if len(self._pixel_keep) == 0 or len(self._world_keep) == 0:
raise ValueError("Cannot slice WCS: the resulting WCS should have "
"at least one pixel and one world dimension.")
@lazyproperty
def dropped_world_dimensions(self):
"""
Information describing the dropped world dimensions.
"""
world_coords = self._pixel_to_world_values_all(*[0]*len(self._pixel_keep))
dropped_info = defaultdict(list)
for i in range(self._wcs.world_n_dim):
if i in self._world_keep:
continue
if "world_axis_object_classes" not in dropped_info:
dropped_info["world_axis_object_classes"] = dict()
wao_classes = self._wcs.world_axis_object_classes
wao_components = self._wcs.world_axis_object_components
dropped_info["value"].append(world_coords[i])
dropped_info["world_axis_names"].append(self._wcs.world_axis_names[i])
dropped_info["world_axis_physical_types"].append(self._wcs.world_axis_physical_types[i])
dropped_info["world_axis_units"].append(self._wcs.world_axis_units[i])
dropped_info["world_axis_object_components"].append(wao_components[i])
dropped_info["world_axis_object_classes"].update(dict(
filter(
lambda x: x[0] == wao_components[i][0], wao_classes.items()
)
))
dropped_info["serialized_classes"] = self.serialized_classes
return dict(dropped_info)
@property
def pixel_n_dim(self):
return len(self._pixel_keep)
@property
def world_n_dim(self):
return len(self._world_keep)
@property
def world_axis_physical_types(self):
return [self._wcs.world_axis_physical_types[i] for i in self._world_keep]
@property
def world_axis_units(self):
return [self._wcs.world_axis_units[i] for i in self._world_keep]
@property
def pixel_axis_names(self):
return [self._wcs.pixel_axis_names[i] for i in self._pixel_keep]
@property
def world_axis_names(self):
return [self._wcs.world_axis_names[i] for i in self._world_keep]
def _pixel_to_world_values_all(self, *pixel_arrays):
pixel_arrays = tuple(map(np.asanyarray, pixel_arrays))
pixel_arrays_new = []
ipix_curr = -1
for ipix in range(self._wcs.pixel_n_dim):
if isinstance(self._slices_pixel[ipix], numbers.Integral):
pixel_arrays_new.append(self._slices_pixel[ipix])
else:
ipix_curr += 1
if self._slices_pixel[ipix].start is not None:
pixel_arrays_new.append(pixel_arrays[ipix_curr] + self._slices_pixel[ipix].start)
else:
pixel_arrays_new.append(pixel_arrays[ipix_curr])
pixel_arrays_new = np.broadcast_arrays(*pixel_arrays_new)
return self._wcs.pixel_to_world_values(*pixel_arrays_new)
def pixel_to_world_values(self, *pixel_arrays):
world_arrays = self._pixel_to_world_values_all(*pixel_arrays)
# Detect the case of a length 0 array
if isinstance(world_arrays, np.ndarray) and not world_arrays.shape:
return world_arrays
if self._wcs.world_n_dim > 1:
# Select the dimensions of the original WCS we are keeping.
world_arrays = [world_arrays[iw] for iw in self._world_keep]
# If there is only one world dimension (after slicing) we shouldn't return a tuple.
if self.world_n_dim == 1:
world_arrays = world_arrays[0]
return world_arrays
def world_to_pixel_values(self, *world_arrays):
world_arrays = tuple(map(np.asanyarray, world_arrays))
world_arrays_new = []
iworld_curr = -1
for iworld in range(self._wcs.world_n_dim):
if iworld in self._world_keep:
iworld_curr += 1
world_arrays_new.append(world_arrays[iworld_curr])
else:
world_arrays_new.append(1.)
world_arrays_new = np.broadcast_arrays(*world_arrays_new)
pixel_arrays = list(self._wcs.world_to_pixel_values(*world_arrays_new))
for ipixel in range(self._wcs.pixel_n_dim):
if isinstance(self._slices_pixel[ipixel], slice) and self._slices_pixel[ipixel].start is not None:
pixel_arrays[ipixel] -= self._slices_pixel[ipixel].start
# Detect the case of a length 0 array
if isinstance(pixel_arrays, np.ndarray) and not pixel_arrays.shape:
return pixel_arrays
pixel = tuple(pixel_arrays[ip] for ip in self._pixel_keep)
if self.pixel_n_dim == 1 and self._wcs.pixel_n_dim > 1:
pixel = pixel[0]
return pixel
@property
def world_axis_object_components(self):
return [self._wcs.world_axis_object_components[idx] for idx in self._world_keep]
@property
def world_axis_object_classes(self):
keys_keep = [item[0] for item in self.world_axis_object_components]
return dict([item for item in self._wcs.world_axis_object_classes.items() if item[0] in keys_keep])
@property
def array_shape(self):
if self._wcs.array_shape:
return np.broadcast_to(0, self._wcs.array_shape)[tuple(self._slices_array)].shape
@property
def pixel_shape(self):
if self.array_shape:
return tuple(self.array_shape[::-1])
@property
def pixel_bounds(self):
if self._wcs.pixel_bounds is None:
return
bounds = []
for idx in self._pixel_keep:
if self._slices_pixel[idx].start is None:
bounds.append(self._wcs.pixel_bounds[idx])
else:
imin, imax = self._wcs.pixel_bounds[idx]
start = self._slices_pixel[idx].start
bounds.append((imin - start, imax - start))
return tuple(bounds)
@property
def axis_correlation_matrix(self):
return self._wcs.axis_correlation_matrix[self._world_keep][:, self._pixel_keep]
|
436046348e98f3fe038e301b923f09a05c74c7671efe8a75ffa0b74a8ca208dc | # Note that we test the main astropy.wcs.WCS class directly rather than testing
# the mix-in class on its own (since it's not functional without being used as
# a mix-in)
import warnings
from packaging.version import Version
import numpy as np
import pytest
from numpy.testing import assert_equal, assert_allclose
from itertools import product
from astropy import units as u
from astropy.time import Time
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import Quantity
from astropy.coordinates import ICRS, FK5, Galactic, SkyCoord, SpectralCoord, ITRS, EarthLocation
from astropy.io.fits import Header
from astropy.io.fits.verify import VerifyWarning
from astropy.units.core import UnitsWarning
from astropy.utils.data import get_pkg_data_filename
from astropy.wcs.wcs import WCS, FITSFixedWarning, Sip, NoConvergence
from astropy.wcs.wcsapi.fitswcs import custom_ctype_to_ucd_mapping, VELOCITY_FRAMES
from astropy.wcs._wcs import __version__ as wcsver
from astropy.utils import iers
from astropy.utils.exceptions import AstropyUserWarning
###############################################################################
# The following example is the simplest WCS with default values
###############################################################################
WCS_EMPTY = WCS(naxis=1)
WCS_EMPTY.wcs.crpix = [1]
def test_empty():
wcs = WCS_EMPTY
# Low-level API
assert wcs.pixel_n_dim == 1
assert wcs.world_n_dim == 1
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == [None]
assert wcs.world_axis_units == ['']
assert wcs.pixel_axis_names == ['']
assert wcs.world_axis_names == ['']
assert_equal(wcs.axis_correlation_matrix, True)
assert wcs.world_axis_object_components == [('world', 0, 'value')]
assert wcs.world_axis_object_classes['world'][0] is Quantity
assert wcs.world_axis_object_classes['world'][1] == ()
assert wcs.world_axis_object_classes['world'][2]['unit'] is u.one
assert_allclose(wcs.pixel_to_world_values(29), 29)
assert_allclose(wcs.array_index_to_world_values(29), 29)
assert np.ndim(wcs.pixel_to_world_values(29)) == 0
assert np.ndim(wcs.array_index_to_world_values(29)) == 0
assert_allclose(wcs.world_to_pixel_values(29), 29)
assert_equal(wcs.world_to_array_index_values(29), (29,))
assert np.ndim(wcs.world_to_pixel_values(29)) == 0
assert np.ndim(wcs.world_to_array_index_values(29)) == 0
# High-level API
coord = wcs.pixel_to_world(29)
assert_quantity_allclose(coord, 29 * u.one)
assert np.ndim(coord) == 0
coord = wcs.array_index_to_world(29)
assert_quantity_allclose(coord, 29 * u.one)
assert np.ndim(coord) == 0
coord = 15 * u.one
x = wcs.world_to_pixel(coord)
assert_allclose(x, 15.)
assert np.ndim(x) == 0
i = wcs.world_to_array_index(coord)
assert_equal(i, 15)
assert np.ndim(i) == 0
###############################################################################
# The following example is a simple 2D image with celestial coordinates
###############################################################################
HEADER_SIMPLE_CELESTIAL = """
WCSAXES = 2
CTYPE1 = RA---TAN
CTYPE2 = DEC--TAN
CRVAL1 = 10
CRVAL2 = 20
CRPIX1 = 30
CRPIX2 = 40
CDELT1 = -0.1
CDELT2 = 0.1
CROTA2 = 0.
CUNIT1 = deg
CUNIT2 = deg
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore', VerifyWarning)
WCS_SIMPLE_CELESTIAL = WCS(Header.fromstring(
HEADER_SIMPLE_CELESTIAL, sep='\n'))
def test_simple_celestial():
wcs = WCS_SIMPLE_CELESTIAL
# Low-level API
assert wcs.pixel_n_dim == 2
assert wcs.world_n_dim == 2
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == ['pos.eq.ra', 'pos.eq.dec']
assert wcs.world_axis_units == ['deg', 'deg']
assert wcs.pixel_axis_names == ['', '']
assert wcs.world_axis_names == ['', '']
assert_equal(wcs.axis_correlation_matrix, True)
assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'),
('celestial', 1, 'spherical.lat.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert_allclose(wcs.pixel_to_world_values(29, 39), (10, 20))
assert_allclose(wcs.array_index_to_world_values(39, 29), (10, 20))
assert_allclose(wcs.world_to_pixel_values(10, 20), (29., 39.))
assert_equal(wcs.world_to_array_index_values(10, 20), (39, 29))
# High-level API
coord = wcs.pixel_to_world(29, 39)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 10)
assert_allclose(coord.dec.deg, 20)
coord = wcs.array_index_to_world(39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 10)
assert_allclose(coord.dec.deg, 20)
coord = SkyCoord(10, 20, unit='deg', frame='icrs')
x, y = wcs.world_to_pixel(coord)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
i, j = wcs.world_to_array_index(coord)
assert_equal(i, 39)
assert_equal(j, 29)
# Check that if the coordinates are passed in a different frame things still
# work properly
coord_galactic = coord.galactic
x, y = wcs.world_to_pixel(coord_galactic)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
i, j = wcs.world_to_array_index(coord_galactic)
assert_equal(i, 39)
assert_equal(j, 29)
# Check that we can actually index the array
data = np.arange(3600).reshape((60, 60))
coord = SkyCoord(10, 20, unit='deg', frame='icrs')
index = wcs.world_to_array_index(coord)
assert_equal(data[index], 2369)
coord = SkyCoord([10, 12], [20, 22], unit='deg', frame='icrs')
index = wcs.world_to_array_index(coord)
assert_equal(data[index], [2369, 3550])
###############################################################################
# The following example is a spectral cube with axes in an unusual order
###############################################################################
HEADER_SPECTRAL_CUBE = """
WCSAXES = 3
CTYPE1 = GLAT-CAR
CTYPE2 = FREQ
CTYPE3 = GLON-CAR
CNAME1 = Latitude
CNAME2 = Frequency
CNAME3 = Longitude
CRVAL1 = 10
CRVAL2 = 20
CRVAL3 = 25
CRPIX1 = 30
CRPIX2 = 40
CRPIX3 = 45
CDELT1 = -0.1
CDELT2 = 0.5
CDELT3 = 0.1
CUNIT1 = deg
CUNIT2 = Hz
CUNIT3 = deg
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore', VerifyWarning)
WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep='\n'))
def test_spectral_cube():
# Spectral cube with a weird axis ordering
wcs = WCS_SPECTRAL_CUBE
# Low-level API
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert wcs.pixel_axis_names == ['', '', '']
assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude']
assert_equal(wcs.axis_correlation_matrix, [[True, False, True],
[False, True, False],
[True, False, True]])
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree')
assert wcs.world_axis_object_components[1][:2] == ('spectral', 0)
assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree')
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['spectral'][0] is Quantity
assert wcs.world_axis_object_classes['spectral'][1] == ()
assert wcs.world_axis_object_classes['spectral'][2] == {}
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29., 39., 44.))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
# High-level API
coord, spec = wcs.pixel_to_world(29, 39, 44)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, Galactic)
assert_allclose(coord.l.deg, 25)
assert_allclose(coord.b.deg, 10)
assert isinstance(spec, SpectralCoord)
assert_allclose(spec.to_value(u.Hz), 20)
coord, spec = wcs.array_index_to_world(44, 39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, Galactic)
assert_allclose(coord.l.deg, 25)
assert_allclose(coord.b.deg, 10)
assert isinstance(spec, SpectralCoord)
assert_allclose(spec.to_value(u.Hz), 20)
coord = SkyCoord(25, 10, unit='deg', frame='galactic')
spec = 20 * u.Hz
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
x, y, z = wcs.world_to_pixel(coord, spec)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
assert_allclose(z, 44.)
# Order of world coordinates shouldn't matter
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
x, y, z = wcs.world_to_pixel(spec, coord)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
assert_allclose(z, 44.)
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
i, j, k = wcs.world_to_array_index(coord, spec)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
# Order of world coordinates shouldn't matter
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
i, j, k = wcs.world_to_array_index(spec, coord)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
HEADER_SPECTRAL_CUBE_NONALIGNED = HEADER_SPECTRAL_CUBE.strip() + '\n' + """
PC2_3 = -0.5
PC3_2 = +0.5
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore', VerifyWarning)
WCS_SPECTRAL_CUBE_NONALIGNED = WCS(Header.fromstring(
HEADER_SPECTRAL_CUBE_NONALIGNED, sep='\n'))
def test_spectral_cube_nonaligned():
# Make sure that correlation matrix gets adjusted if there are non-identity
# CD matrix terms.
wcs = WCS_SPECTRAL_CUBE_NONALIGNED
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert wcs.pixel_axis_names == ['', '', '']
assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude']
assert_equal(wcs.axis_correlation_matrix, [[True, True, True],
[False, True, True],
[True, True, True]])
# NOTE: we check world_axis_object_components and world_axis_object_classes
# again here because in the past this failed when non-aligned axes were
# present, so this serves as a regression test.
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree')
assert wcs.world_axis_object_components[1][:2] == ('spectral', 0)
assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree')
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['spectral'][0] is Quantity
assert wcs.world_axis_object_classes['spectral'][1] == ()
assert wcs.world_axis_object_classes['spectral'][2] == {}
###############################################################################
# The following example is from Rots et al (2015), Table 5. It represents a
# cube with two spatial dimensions and one time dimension
###############################################################################
HEADER_TIME_CUBE = """
SIMPLE = T / Fits standard
BITPIX = -32 / Bits per pixel
NAXIS = 3 / Number of axes
NAXIS1 = 2048 / Axis length
NAXIS2 = 2048 / Axis length
NAXIS3 = 11 / Axis length
DATE = '2008-10-28T14:39:06' / Date FITS file was generated
OBJECT = '2008 TC3' / Name of the object observed
EXPTIME = 1.0011 / Integration time
MJD-OBS = 54746.02749237 / Obs start
DATE-OBS= '2008-10-07T00:39:35.3342' / Observing date
TELESCOP= 'VISTA' / ESO Telescope Name
INSTRUME= 'VIRCAM' / Instrument used.
TIMESYS = 'UTC' / From Observatory Time System
TREFPOS = 'TOPOCENT' / Topocentric
MJDREF = 54746.0 / Time reference point in MJD
RADESYS = 'ICRS' / Not equinoctal
CTYPE2 = 'RA---ZPN' / Zenithal Polynomial Projection
CRVAL2 = 2.01824372640628 / RA at ref pixel
CUNIT2 = 'deg' / Angles are degrees always
CRPIX2 = 2956.6 / Pixel coordinate at ref point
CTYPE1 = 'DEC--ZPN' / Zenithal Polynomial Projection
CRVAL1 = 14.8289418840003 / Dec at ref pixel
CUNIT1 = 'deg' / Angles are degrees always
CRPIX1 = -448.2 / Pixel coordinate at ref point
CTYPE3 = 'UTC' / linear time (UTC)
CRVAL3 = 2375.341 / Relative time of first frame
CUNIT3 = 's' / Time unit
CRPIX3 = 1.0 / Pixel coordinate at ref point
CTYPE3A = 'TT' / alternative linear time (TT)
CRVAL3A = 2440.525 / Relative time of first frame
CUNIT3A = 's' / Time unit
CRPIX3A = 1.0 / Pixel coordinate at ref point
OBSGEO-B= -24.6157 / [deg] Tel geodetic latitute (=North)+
OBSGEO-L= -70.3976 / [deg] Tel geodetic longitude (=East)+
OBSGEO-H= 2530.0000 / [m] Tel height above reference ellipsoid
CRDER3 = 0.0819 / random error in timings from fit
CSYER3 = 0.0100 / absolute time error
PC1_1 = 0.999999971570892 / WCS transform matrix element
PC1_2 = 0.000238449608932 / WCS transform matrix element
PC2_1 = -0.000621542859395 / WCS transform matrix element
PC2_2 = 0.999999806842218 / WCS transform matrix element
CDELT1 = -9.48575432499806E-5 / Axis scale at reference point
CDELT2 = 9.48683176211164E-5 / Axis scale at reference point
CDELT3 = 13.3629 / Axis scale at reference point
PV1_1 = 1. / ZPN linear term
PV1_3 = 42. / ZPN cubic term
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore', (VerifyWarning, FITSFixedWarning))
WCS_TIME_CUBE = WCS(Header.fromstring(HEADER_TIME_CUBE, sep='\n'))
def test_time_cube():
# Spectral cube with a weird axis ordering
wcs = WCS_TIME_CUBE
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (11, 2048, 2048)
assert wcs.pixel_shape == (2048, 2048, 11)
assert wcs.world_axis_physical_types == ['pos.eq.dec', 'pos.eq.ra', 'time']
assert wcs.world_axis_units == ['deg', 'deg', 's']
assert wcs.pixel_axis_names == ['', '', '']
assert wcs.world_axis_names == ['', '', '']
assert_equal(wcs.axis_correlation_matrix, [[True, True, False],
[True, True, False],
[False, False, True]])
components = wcs.world_axis_object_components
assert components[0] == ('celestial', 1, 'spherical.lat.degree')
assert components[1] == ('celestial', 0, 'spherical.lon.degree')
assert components[2][:2] == ('time', 0)
assert callable(components[2][2])
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['time'][0] is Time
assert wcs.world_axis_object_classes['time'][1] == ()
assert wcs.world_axis_object_classes['time'][2] == {}
assert callable(wcs.world_axis_object_classes['time'][3])
assert_allclose(wcs.pixel_to_world_values(-449.2, 2955.6, 0),
(14.8289418840003, 2.01824372640628, 2375.341))
assert_allclose(wcs.array_index_to_world_values(0, 2955.6, -449.2),
(14.8289418840003, 2.01824372640628, 2375.341))
assert_allclose(wcs.world_to_pixel_values(14.8289418840003, 2.01824372640628, 2375.341),
(-449.2, 2955.6, 0))
assert_equal(wcs.world_to_array_index_values(14.8289418840003, 2.01824372640628, 2375.341),
(0, 2956, -449))
# High-level API
coord, time = wcs.pixel_to_world(29, 39, 44)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 1.7323356692202325)
assert_allclose(coord.dec.deg, 14.783516054817797)
assert isinstance(time, Time)
assert_allclose(time.mjd, 54746.03429755324)
coord, time = wcs.array_index_to_world(44, 39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 1.7323356692202325)
assert_allclose(coord.dec.deg, 14.783516054817797)
assert isinstance(time, Time)
assert_allclose(time.mjd, 54746.03429755324)
x, y, z = wcs.world_to_pixel(coord, time)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
assert_allclose(z, 44.)
# Order of world coordinates shouldn't matter
x, y, z = wcs.world_to_pixel(time, coord)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
assert_allclose(z, 44.)
i, j, k = wcs.world_to_array_index(coord, time)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
# Order of world coordinates shouldn't matter
i, j, k = wcs.world_to_array_index(time, coord)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
###############################################################################
# The following tests are to make sure that Time objects are constructed
# correctly for a variety of combinations of WCS keywords
###############################################################################
HEADER_TIME_1D = """
SIMPLE = T
BITPIX = -32
NAXIS = 1
NAXIS1 = 2048
TIMESYS = 'UTC'
TREFPOS = 'TOPOCENT'
MJDREF = 50002.6
CTYPE1 = 'UTC'
CRVAL1 = 5
CUNIT1 = 's'
CRPIX1 = 1.0
CDELT1 = 2
OBSGEO-L= -20
OBSGEO-B= -70
OBSGEO-H= 2530
"""
if Version(wcsver) >= Version('7.1'):
HEADER_TIME_1D += "DATEREF = '1995-10-12T14:24:00'\n"
@pytest.fixture
def header_time_1d():
return Header.fromstring(HEADER_TIME_1D, sep='\n')
def assert_time_at(header, position, jd1, jd2, scale, format):
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header)
time = wcs.pixel_to_world(position)
assert_allclose(time.jd1, jd1, rtol=1e-10)
assert_allclose(time.jd2, jd2, rtol=1e-10)
assert time.format == format
assert time.scale == scale
@pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc', 'local'))
def test_time_1d_values(header_time_1d, scale):
# Check that Time objects are instantiated with the correct values,
# scales, and formats.
header_time_1d['CTYPE1'] = scale.upper()
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, scale, 'mjd')
def test_time_1d_values_gps(header_time_1d):
# Special treatment for GPS scale
header_time_1d['CTYPE1'] = 'GPS'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + (7 + 19) / 3600 / 24, 'tai', 'mjd')
def test_time_1d_values_deprecated(header_time_1d):
# Deprecated (in FITS) scales
header_time_1d['CTYPE1'] = 'TDT'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd')
header_time_1d['CTYPE1'] = 'IAT'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd')
header_time_1d['CTYPE1'] = 'GMT'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd')
header_time_1d['CTYPE1'] = 'ET'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd')
def test_time_1d_values_time(header_time_1d):
header_time_1d['CTYPE1'] = 'TIME'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd')
header_time_1d['TIMESYS'] = 'TAI'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd')
@pytest.mark.remote_data
@pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc'))
def test_time_1d_roundtrip(header_time_1d, scale):
# Check that coordinates round-trip
pixel_in = np.arange(3, 10)
header_time_1d['CTYPE1'] = scale.upper()
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header_time_1d)
# Simple test
time = wcs.pixel_to_world(pixel_in)
pixel_out = wcs.world_to_pixel(time)
assert_allclose(pixel_in, pixel_out)
# Test with an intermediate change to a different scale/format
time = wcs.pixel_to_world(pixel_in).tdb
time.format = 'isot'
pixel_out = wcs.world_to_pixel(time)
assert_allclose(pixel_in, pixel_out)
def test_time_1d_high_precision(header_time_1d):
# Case where the MJDREF is split into two for high precision
del header_time_1d['MJDREF']
header_time_1d['MJDREFI'] = 52000.
header_time_1d['MJDREFF'] = 1e-11
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header_time_1d)
time = wcs.pixel_to_world(10)
# Here we have to use a very small rtol to really test that MJDREFF is
# taken into account
assert_allclose(time.jd1, 2452001.0, rtol=1e-12)
assert_allclose(time.jd2, -0.5 + 25 / 3600 / 24 + 1e-11, rtol=1e-13)
def test_time_1d_location_geodetic(header_time_1d):
# Make sure that the location is correctly returned (geodetic case)
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header_time_1d)
time = wcs.pixel_to_world(10)
lon, lat, alt = time.location.to_geodetic()
# FIXME: alt won't work for now because ERFA doesn't implement the IAU 1976
# ellipsoid (https://github.com/astropy/astropy/issues/9420)
assert_allclose(lon.degree, -20)
assert_allclose(lat.degree, -70)
# assert_allclose(alt.to_value(u.m), 2530.)
@pytest.fixture
def header_time_1d_no_obs():
header = Header.fromstring(HEADER_TIME_1D, sep='\n')
del header['OBSGEO-L']
del header['OBSGEO-B']
del header['OBSGEO-H']
return header
def test_time_1d_location_geocentric(header_time_1d_no_obs):
# Make sure that the location is correctly returned (geocentric case)
header = header_time_1d_no_obs
header['OBSGEO-X'] = 10
header['OBSGEO-Y'] = -20
header['OBSGEO-Z'] = 30
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header)
time = wcs.pixel_to_world(10)
x, y, z = time.location.to_geocentric()
assert_allclose(x.to_value(u.m), 10)
assert_allclose(y.to_value(u.m), -20)
assert_allclose(z.to_value(u.m), 30)
def test_time_1d_location_geocenter(header_time_1d_no_obs):
header_time_1d_no_obs['TREFPOS'] = 'GEOCENTER'
wcs = WCS(header_time_1d_no_obs)
time = wcs.pixel_to_world(10)
x, y, z = time.location.to_geocentric()
assert_allclose(x.to_value(u.m), 0)
assert_allclose(y.to_value(u.m), 0)
assert_allclose(z.to_value(u.m), 0)
def test_time_1d_location_missing(header_time_1d_no_obs):
# Check what happens when no location is present
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(UserWarning,
match='Missing or incomplete observer location '
'information, setting location in Time to None'):
time = wcs.pixel_to_world(10)
assert time.location is None
def test_time_1d_location_incomplete(header_time_1d_no_obs):
# Check what happens when location information is incomplete
header_time_1d_no_obs['OBSGEO-L'] = 10.
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(UserWarning,
match='Missing or incomplete observer location '
'information, setting location in Time to None'):
time = wcs.pixel_to_world(10)
assert time.location is None
def test_time_1d_location_unsupported(header_time_1d_no_obs):
# Check what happens when TREFPOS is unsupported
header_time_1d_no_obs['TREFPOS'] = 'BARYCENTER'
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(UserWarning,
match="Observation location 'barycenter' is not "
"supported, setting location in Time to None"):
time = wcs.pixel_to_world(10)
assert time.location is None
def test_time_1d_unsupported_ctype(header_time_1d_no_obs):
# For cases that we don't support yet, e.g. UT(...), use Time and drop sub-scale
# Case where the MJDREF is split into two for high precision
header_time_1d_no_obs['CTYPE1'] = 'UT(WWV)'
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(UserWarning,
match="Dropping unsupported sub-scale WWV from scale UT"):
time = wcs.pixel_to_world(10)
assert isinstance(time, Time)
###############################################################################
# Extra corner cases
###############################################################################
def test_unrecognized_unit():
# TODO: Determine whether the following behavior is desirable
wcs = WCS(naxis=1)
with pytest.warns(UnitsWarning):
wcs.wcs.cunit = ['bananas // sekonds']
assert wcs.world_axis_units == ['bananas // sekonds']
def test_distortion_correlations():
filename = get_pkg_data_filename('../../tests/data/sip.fits')
with pytest.warns(FITSFixedWarning):
w = WCS(filename)
assert_equal(w.axis_correlation_matrix, True)
# Changing PC to an identity matrix doesn't change anything since
# distortions are still present.
w.wcs.pc = [[1, 0], [0, 1]]
assert_equal(w.axis_correlation_matrix, True)
# Nor does changing the name of the axes to make them non-celestial
w.wcs.ctype = ['X', 'Y']
assert_equal(w.axis_correlation_matrix, True)
# However once we turn off the distortions the matrix changes
w.sip = None
assert_equal(w.axis_correlation_matrix, [[True, False], [False, True]])
# If we go back to celestial coordinates then the matrix is all True again
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_equal(w.axis_correlation_matrix, True)
# Or if we change to X/Y but have a non-identity PC
w.wcs.pc = [[0.9, -0.1], [0.1, 0.9]]
w.wcs.ctype = ['X', 'Y']
assert_equal(w.axis_correlation_matrix, True)
def test_custom_ctype_to_ucd_mappings():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ['SPAM']
assert wcs.world_axis_physical_types == [None]
# Check simple behavior
with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}):
assert wcs.world_axis_physical_types == [None]
with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit', 'SPAM': 'food.spam'}):
assert wcs.world_axis_physical_types == ['food.spam']
# Check nesting
with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}):
assert wcs.world_axis_physical_types == ['food.spam']
with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}):
with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
assert wcs.world_axis_physical_types == ['food.spam']
# Check priority in nesting
with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}):
with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
assert wcs.world_axis_physical_types == ['food.spam']
with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}):
assert wcs.world_axis_physical_types == ['notfood']
def test_caching_components_and_classes():
# Make sure that when we change the WCS object, the classes and components
# are updated (we use a cache internally, so we need to make sure the cache
# is invalidated if needed)
wcs = WCS_SIMPLE_CELESTIAL.deepcopy()
assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'),
('celestial', 1, 'spherical.lat.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
wcs.wcs.radesys = 'FK5'
frame = wcs.world_axis_object_classes['celestial'][2]['frame']
assert isinstance(frame, FK5)
assert frame.equinox.jyear == 2000.
wcs.wcs.equinox = 2010
frame = wcs.world_axis_object_classes['celestial'][2]['frame']
assert isinstance(frame, FK5)
assert frame.equinox.jyear == 2010.
def test_sub_wcsapi_attributes():
# Regression test for a bug that caused some of the WCS attributes to be
# incorrect when using WCS.sub or WCS.celestial (which is an alias for sub
# with lon/lat types).
wcs = WCS_SPECTRAL_CUBE.deepcopy()
wcs.pixel_shape = (30, 40, 50)
wcs.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
# Use celestial shortcut
wcs_sub1 = wcs.celestial
assert wcs_sub1.pixel_n_dim == 2
assert wcs_sub1.world_n_dim == 2
assert wcs_sub1.array_shape == (50, 30)
assert wcs_sub1.pixel_shape == (30, 50)
assert wcs_sub1.pixel_bounds == [(-1, 11), (5, 15)]
assert wcs_sub1.world_axis_physical_types == ['pos.galactic.lat', 'pos.galactic.lon']
assert wcs_sub1.world_axis_units == ['deg', 'deg']
assert wcs_sub1.world_axis_names == ['Latitude', 'Longitude']
# Try adding axes
wcs_sub2 = wcs.sub([0, 2, 0])
assert wcs_sub2.pixel_n_dim == 3
assert wcs_sub2.world_n_dim == 3
assert wcs_sub2.array_shape == (None, 40, None)
assert wcs_sub2.pixel_shape == (None, 40, None)
assert wcs_sub2.pixel_bounds == [None, (-2, 18), None]
assert wcs_sub2.world_axis_physical_types == [None, 'em.freq', None]
assert wcs_sub2.world_axis_units == ['', 'Hz', '']
assert wcs_sub2.world_axis_names == ['', 'Frequency', '']
# Use strings
wcs_sub3 = wcs.sub(['longitude', 'latitude'])
assert wcs_sub3.pixel_n_dim == 2
assert wcs_sub3.world_n_dim == 2
assert wcs_sub3.array_shape == (30, 50)
assert wcs_sub3.pixel_shape == (50, 30)
assert wcs_sub3.pixel_bounds == [(5, 15), (-1, 11)]
assert wcs_sub3.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat']
assert wcs_sub3.world_axis_units == ['deg', 'deg']
assert wcs_sub3.world_axis_names == ['Longitude', 'Latitude']
# Now try without CNAME set
wcs.wcs.cname = [''] * wcs.wcs.naxis
wcs_sub4 = wcs.sub(['longitude', 'latitude'])
assert wcs_sub4.pixel_n_dim == 2
assert wcs_sub4.world_n_dim == 2
assert wcs_sub4.array_shape == (30, 50)
assert wcs_sub4.pixel_shape == (50, 30)
assert wcs_sub4.pixel_bounds == [(5, 15), (-1, 11)]
assert wcs_sub4.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat']
assert wcs_sub4.world_axis_units == ['deg', 'deg']
assert wcs_sub4.world_axis_names == ['', '']
HEADER_POLARIZED = """
CTYPE1 = 'HPLT-TAN'
CTYPE2 = 'HPLN-TAN'
CTYPE3 = 'STOKES'
"""
@pytest.fixture
def header_polarized():
return Header.fromstring(HEADER_POLARIZED, sep='\n')
def test_phys_type_polarization(header_polarized):
w = WCS(header_polarized)
assert w.world_axis_physical_types[2] == 'phys.polarization.stokes'
###############################################################################
# Spectral transformations
###############################################################################
HEADER_SPECTRAL_FRAMES = """
BUNIT = 'Jy/beam'
EQUINOX = 2.000000000E+03
CTYPE1 = 'RA---SIN'
CRVAL1 = 2.60108333333E+02
CDELT1 = -2.777777845E-04
CRPIX1 = 1.0
CUNIT1 = 'deg'
CTYPE2 = 'DEC--SIN'
CRVAL2 = -9.75000000000E-01
CDELT2 = 2.777777845E-04
CRPIX2 = 1.0
CUNIT2 = 'deg'
CTYPE3 = 'FREQ'
CRVAL3 = 1.37835117405E+09
CDELT3 = 9.765625000E+04
CRPIX3 = 32.0
CUNIT3 = 'Hz'
SPECSYS = 'TOPOCENT'
RESTFRQ = 1.420405752E+09 / [Hz]
RADESYS = 'FK5'
"""
@pytest.fixture
def header_spectral_frames():
return Header.fromstring(HEADER_SPECTRAL_FRAMES, sep='\n')
def test_spectralcoord_frame(header_spectral_frames):
# This is a test to check the numerical results of transformations between
# different velocity frames. We simply make sure that the returned
# SpectralCoords are in the right frame but don't check the transformations
# since this is already done in test_spectralcoord_accuracy
# in astropy.coordinates.
with iers.conf.set_temp('auto_download', False):
obstime = Time("2009-05-04T04:44:23", scale='utc')
header = header_spectral_frames.copy()
header['MJD-OBS'] = obstime.mjd
header['CRVAL1'] = 16.33211
header['CRVAL2'] = -34.2221
header['OBSGEO-L'] = 144.2
header['OBSGEO-B'] = -20.2
header['OBSGEO-H'] = 0.
# We start off with a WCS defined in topocentric frequency
with pytest.warns(FITSFixedWarning):
wcs_topo = WCS(header)
# We convert a single pixel coordinate to world coordinates and keep only
# the second high level object - a SpectralCoord:
sc_topo = wcs_topo.pixel_to_world(0, 0, 31)[1]
# We check that this is in topocentric frame with zero velocities
assert isinstance(sc_topo, SpectralCoord)
assert isinstance(sc_topo.observer, ITRS)
assert sc_topo.observer.obstime.isot == obstime.isot
assert_equal(sc_topo.observer.data.differentials['s'].d_xyz.value, 0)
observatory = EarthLocation.from_geodetic(144.2, -20.2).get_itrs(obstime=obstime).transform_to(ICRS())
assert observatory.separation_3d(sc_topo.observer.transform_to(ICRS())) < 1 * u.km
for specsys, expected_frame in VELOCITY_FRAMES.items():
header['SPECSYS'] = specsys
with pytest.warns(FITSFixedWarning):
wcs = WCS(header)
sc = wcs.pixel_to_world(0, 0, 31)[1]
# Now transform to the expected velocity frame, which should leave
# the spectral coordinate unchanged
sc_check = sc.with_observer_stationary_relative_to(expected_frame)
assert_quantity_allclose(sc.quantity, sc_check.quantity)
@pytest.mark.parametrize(('ctype3', 'observer'), product(['ZOPT', 'BETA', 'VELO', 'VRAD', 'VOPT'], [False, True]))
def test_different_ctypes(header_spectral_frames, ctype3, observer):
header = header_spectral_frames.copy()
header['CTYPE3'] = ctype3
header['CRVAL3'] = 0.1
header['CDELT3'] = 0.001
if ctype3[0] == 'V':
header['CUNIT3'] = 'm s-1'
else:
header['CUNIT3'] = ''
header['RESTWAV'] = 1.420405752E+09
header['MJD-OBS'] = 55197
if observer:
header['OBSGEO-L'] = 144.2
header['OBSGEO-B'] = -20.2
header['OBSGEO-H'] = 0.
header['SPECSYS'] = 'BARYCENT'
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header)
skycoord, spectralcoord = wcs.pixel_to_world(0, 0, 31)
assert isinstance(spectralcoord, SpectralCoord)
if observer:
pix = wcs.world_to_pixel(skycoord, spectralcoord)
else:
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
pix = wcs.world_to_pixel(skycoord, spectralcoord)
assert_allclose(pix, [0, 0, 31], rtol=1e-6, atol=1e-9)
def test_non_convergence_warning():
"""Test case for issue #11446
Since we can't define a target accuracy when plotting a WCS `all_world2pix`
should not error but only warn when the default accuracy can't be reached.
"""
# define a minimal WCS where convergence fails for certain image positions
wcs = WCS(naxis=2)
crpix = [0, 0]
a = b = ap = bp = np.zeros((4, 4))
a[3, 0] = -1.20116753e-07
test_pos_x = [1000, 1]
test_pos_y = [0, 2]
wcs.sip = Sip(a, b, ap, bp, crpix)
# first make sure the WCS works when using a low accuracy
expected = wcs.all_world2pix(test_pos_x, test_pos_y, 0, tolerance=1e-3)
# then check that it fails when using the default accuracy
with pytest.raises(NoConvergence):
wcs.all_world2pix(test_pos_x, test_pos_y, 0)
# at last check that world_to_pixel_values raises a warning but returns
# the same 'low accuray' result
with pytest.warns(UserWarning):
assert_allclose(wcs.world_to_pixel_values(test_pos_x, test_pos_y),
expected)
HEADER_SPECTRAL_1D = """
CTYPE1 = 'FREQ'
CRVAL1 = 1.37835117405E+09
CDELT1 = 9.765625000E+04
CRPIX1 = 32.0
CUNIT1 = 'Hz'
SPECSYS = 'TOPOCENT'
RESTFRQ = 1.420405752E+09 / [Hz]
RADESYS = 'FK5'
"""
@pytest.fixture
def header_spectral_1d():
return Header.fromstring(HEADER_SPECTRAL_1D, sep='\n')
@pytest.mark.parametrize(('ctype1', 'observer'), product(['ZOPT', 'BETA', 'VELO', 'VRAD', 'VOPT'], [False, True]))
def test_spectral_1d(header_spectral_1d, ctype1, observer):
# This is a regression test for issues that happened with 1-d WCS
# where the target is not defined but observer is.
header = header_spectral_1d.copy()
header['CTYPE1'] = ctype1
header['CRVAL1'] = 0.1
header['CDELT1'] = 0.001
if ctype1[0] == 'V':
header['CUNIT1'] = 'm s-1'
else:
header['CUNIT1'] = ''
header['RESTWAV'] = 1.420405752E+09
header['MJD-OBS'] = 55197
if observer:
header['OBSGEO-L'] = 144.2
header['OBSGEO-B'] = -20.2
header['OBSGEO-H'] = 0.
header['SPECSYS'] = 'BARYCENT'
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header)
# First ensure that transformations round-trip
spectralcoord = wcs.pixel_to_world(31)
assert isinstance(spectralcoord, SpectralCoord)
assert spectralcoord.target is None
assert (spectralcoord.observer is not None) is observer
if observer:
expected_message = 'No target defined on SpectralCoord'
else:
expected_message = 'No observer defined on WCS'
with pytest.warns(AstropyUserWarning, match=expected_message):
pix = wcs.world_to_pixel(spectralcoord)
assert_allclose(pix, [31], rtol=1e-6)
# Also make sure that we can convert a SpectralCoord on which the observer
# is not defined but the target is.
with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
spectralcoord_no_obs = SpectralCoord(spectralcoord.quantity,
doppler_rest=spectralcoord.doppler_rest,
doppler_convention=spectralcoord.doppler_convention,
target=ICRS(10 * u.deg, 20 * u.deg, distance=1 * u.kpc))
if observer:
expected_message = 'No observer defined on SpectralCoord'
else:
expected_message = 'No observer defined on WCS'
with pytest.warns(AstropyUserWarning, match=expected_message):
pix2 = wcs.world_to_pixel(spectralcoord_no_obs)
assert_allclose(pix2, [31], rtol=1e-6)
# And finally check case when both observer and target are defined on the
# SpectralCoord
with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
spectralcoord_no_obs = SpectralCoord(spectralcoord.quantity,
doppler_rest=spectralcoord.doppler_rest,
doppler_convention=spectralcoord.doppler_convention,
observer=ICRS(10 * u.deg, 20 * u.deg, distance=0 * u.kpc),
target=ICRS(10 * u.deg, 20 * u.deg, distance=1 * u.kpc))
if observer:
pix3 = wcs.world_to_pixel(spectralcoord_no_obs)
else:
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
pix3 = wcs.world_to_pixel(spectralcoord_no_obs)
assert_allclose(pix3, [31], rtol=1e-6)
HEADER_SPECTRAL_WITH_TIME = """
WCSAXES = 3
CTYPE1 = 'RA---TAN'
CTYPE2 = 'DEC--TAN'
CTYPE3 = 'WAVE'
CRVAL1 = 98.83153
CRVAL2 = -66.818
CRVAL3 = 6.4205
CRPIX1 = 21.
CRPIX2 = 22.
CRPIX3 = 1.
CDELT1 = 3.6111E-05
CDELT2 = 3.6111E-05
CDELT3 = 0.001
CUNIT1 = 'deg'
CUNIT2 = 'deg'
CUNIT3 = 'um'
MJD-AVG = 59045.41466
RADESYS = 'ICRS'
SPECSYS = 'BARYCENT'
TIMESYS = 'UTC'
"""
@pytest.fixture
def header_spectral_with_time():
return Header.fromstring(HEADER_SPECTRAL_WITH_TIME, sep='\n')
def test_spectral_with_time_kw(header_spectral_with_time):
def check_wcs(header):
assert_allclose(w.all_pix2world(*w.wcs.crpix, 1), w.wcs.crval)
sky, spec = w.pixel_to_world(*w.wcs.crpix)
assert_allclose((sky.spherical.lon.degree, sky.spherical.lat.degree, spec.value),
w.wcs.crval, rtol=1e-3)
# Chek with MJD-AVG and TIMESYS
hdr = header_spectral_with_time.copy()
with warnings.catch_warnings():
warnings.simplefilter('ignore', (VerifyWarning, FITSFixedWarning))
w = WCS(hdr)
# Make sure the correct keyword is used in a test
assert ~np.isnan(w.wcs.mjdavg)
assert np.isnan(w.wcs.mjdobs)
check_wcs(w)
# Check fall back to MJD-OBS
hdr['MJD-OBS'] = hdr['MJD-AVG']
del hdr['MJD-AVG']
with warnings.catch_warnings():
warnings.simplefilter('ignore', (VerifyWarning, FITSFixedWarning))
w = WCS(hdr)
# Make sure the correct keyword is used in a test
assert ~np.isnan(w.wcs.mjdobs)
assert np.isnan(w.wcs.mjdavg)
check_wcs(w)
# Check fall back to DATE--OBS
hdr['DATE-OBS'] = '2020-07-15'
del hdr['MJD-OBS']
with warnings.catch_warnings():
warnings.simplefilter('ignore', (VerifyWarning, FITSFixedWarning))
w = WCS(hdr)
w.wcs.mjdobs = np.nan
# Make sure the correct keyword is used in a test
assert np.isnan(w.wcs.mjdobs)
assert np.isnan(w.wcs.mjdavg)
assert w.wcs.dateobs != ""
check_wcs(hdr)
# Check fall back to scale='utc'
del hdr['TIMESYS']
check_wcs(hdr)
|
6c917b9ae11646e4554b59a97aea10c752fb96588d0c85c961c7f4aef33baae1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import itertools
import operator
from decimal import Decimal
from datetime import timedelta
import pytest
import numpy as np
from astropy.time import (
Time, TimeDelta, OperandTypeError, ScaleValueError, TIME_SCALES,
STANDARD_TIME_SCALES, TIME_DELTA_SCALES, TimeDeltaMissingUnitWarning,
)
from astropy.utils import iers
from astropy import units as u
from astropy.table import Table
allclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52 * 24 * 3600) # 20 ps atol
orig_auto_download = iers.conf.auto_download
def setup_module(module):
"""Use offline IERS table only."""
iers.conf.auto_download = False
def teardown_module(module):
"""Restore original setting."""
iers.conf.auto_download = orig_auto_download
class TestTimeDelta:
"""Test TimeDelta class"""
def setup(self):
self.t = Time('2010-01-01', scale='utc')
self.t2 = Time('2010-01-02 00:00:01', scale='utc')
self.t3 = Time('2010-01-03 01:02:03', scale='utc', precision=9,
in_subfmt='date_hms', out_subfmt='date_hm',
location=(-75. * u.degree, 30. * u.degree, 500 * u.m))
self.t4 = Time('2010-01-01', scale='local')
self.dt = TimeDelta(100.0, format='sec')
self.dt_array = TimeDelta(np.arange(100, 1000, 100), format='sec')
def test_sub(self):
# time - time
dt = self.t2 - self.t
assert (repr(dt).startswith("<TimeDelta object: scale='tai' "
"format='jd' value=1.00001157407"))
assert allclose_jd(dt.jd, 86401.0 / 86400.0)
assert allclose_sec(dt.sec, 86401.0)
# time - delta_time
t = self.t2 - dt
assert t.iso == self.t.iso
# delta_time - delta_time
dt2 = dt - self.dt
assert allclose_sec(dt2.sec, 86301.0)
# delta_time - time
with pytest.raises(OperandTypeError):
dt - self.t
def test_add(self):
# time + time
with pytest.raises(OperandTypeError):
self.t2 + self.t
# time + delta_time
dt = self.t2 - self.t
t2 = self.t + dt
assert t2.iso == self.t2.iso
# delta_time + delta_time
dt2 = dt + self.dt
assert allclose_sec(dt2.sec, 86501.0)
# delta_time + time
dt = self.t2 - self.t
t2 = dt + self.t
assert t2.iso == self.t2.iso
def test_add_vector(self):
"""Check time arithmetic as well as properly keeping track of whether
a time is a scalar or a vector"""
t = Time(0.0, format='mjd', scale='tai')
t2 = Time([0.0, 1.0], format='mjd', scale='tai')
dt = TimeDelta(100.0, format='jd')
dt2 = TimeDelta([100.0, 200.0], format='jd')
out = t + dt
assert allclose_jd(out.mjd, 100.0)
assert out.isscalar
out = t + dt2
assert allclose_jd(out.mjd, [100.0, 200.0])
assert not out.isscalar
out = t2 + dt
assert allclose_jd(out.mjd, [100.0, 101.0])
assert not out.isscalar
out = dt + dt
assert allclose_jd(out.jd, 200.0)
assert out.isscalar
out = dt + dt2
assert allclose_jd(out.jd, [200.0, 300.0])
assert not out.isscalar
# Reverse the argument order
out = dt + t
assert allclose_jd(out.mjd, 100.0)
assert out.isscalar
out = dt2 + t
assert allclose_jd(out.mjd, [100.0, 200.0])
assert not out.isscalar
out = dt + t2
assert allclose_jd(out.mjd, [100.0, 101.0])
assert not out.isscalar
out = dt2 + dt
assert allclose_jd(out.jd, [200.0, 300.0])
assert not out.isscalar
def test_sub_vector(self):
"""Check time arithmetic as well as properly keeping track of whether
a time is a scalar or a vector"""
t = Time(0.0, format='mjd', scale='tai')
t2 = Time([0.0, 1.0], format='mjd', scale='tai')
dt = TimeDelta(100.0, format='jd')
dt2 = TimeDelta([100.0, 200.0], format='jd')
out = t - dt
assert allclose_jd(out.mjd, -100.0)
assert out.isscalar
out = t - dt2
assert allclose_jd(out.mjd, [-100.0, -200.0])
assert not out.isscalar
out = t2 - dt
assert allclose_jd(out.mjd, [-100.0, -99.0])
assert not out.isscalar
out = dt - dt
assert allclose_jd(out.jd, 0.0)
assert out.isscalar
out = dt - dt2
assert allclose_jd(out.jd, [0.0, -100.0])
assert not out.isscalar
@pytest.mark.parametrize('values', [(2455197.5, 2455198.5),
([2455197.5], [2455198.5])])
def test_copy_timedelta(self, values):
"""Test copying the values of a TimeDelta object by passing it into the
Time initializer.
"""
val1, val2 = values
t = Time(val1, format='jd', scale='utc')
t2 = Time(val2, format='jd', scale='utc')
dt = t2 - t
dt2 = TimeDelta(dt, copy=False)
assert np.all(dt.jd == dt2.jd)
assert dt._time.jd1 is dt2._time.jd1
assert dt._time.jd2 is dt2._time.jd2
dt2 = TimeDelta(dt, copy=True)
assert np.all(dt.jd == dt2.jd)
assert dt._time.jd1 is not dt2._time.jd1
assert dt._time.jd2 is not dt2._time.jd2
# Include initializers
dt2 = TimeDelta(dt, format='sec')
assert allclose_sec(dt2.value, 86400.0)
def test_neg_abs(self):
for dt in (self.dt, self.dt_array):
dt2 = -dt
assert np.all(dt2.jd == -dt.jd)
dt3 = abs(dt)
assert np.all(dt3.jd == dt.jd)
dt4 = abs(dt2)
assert np.all(dt4.jd == dt.jd)
def test_mul_div(self):
for dt in (self.dt, self.dt_array):
dt2 = dt + dt + dt
dt3 = 3. * dt
assert allclose_jd(dt2.jd, dt3.jd)
dt4 = dt3 / 3.
assert allclose_jd(dt4.jd, dt.jd)
dt5 = self.dt * np.arange(3)
assert dt5[0].jd == 0.
assert dt5[-1].jd == (self.dt + self.dt).jd
dt6 = self.dt * [0, 1, 2]
assert np.all(dt6.jd == dt5.jd)
with pytest.raises(OperandTypeError):
self.dt * self.t
with pytest.raises(TypeError):
self.dt * object()
def test_keep_properties(self):
# closes #1924 (partially)
dt = TimeDelta(1000., format='sec')
for t in (self.t, self.t3):
ta = t + dt
assert ta.location is t.location
assert ta.precision == t.precision
assert ta.in_subfmt == t.in_subfmt
assert ta.out_subfmt == t.out_subfmt
tr = dt + t
assert tr.location is t.location
assert tr.precision == t.precision
assert tr.in_subfmt == t.in_subfmt
assert tr.out_subfmt == t.out_subfmt
ts = t - dt
assert ts.location is t.location
assert ts.precision == t.precision
assert ts.in_subfmt == t.in_subfmt
assert ts.out_subfmt == t.out_subfmt
t_tdb = self.t.tdb
assert hasattr(t_tdb, '_delta_tdb_tt')
assert not hasattr(t_tdb, '_delta_ut1_utc')
t_tdb_ut1 = t_tdb.ut1
assert hasattr(t_tdb_ut1, '_delta_tdb_tt')
assert hasattr(t_tdb_ut1, '_delta_ut1_utc')
t_tdb_ut1_utc = t_tdb_ut1.utc
assert hasattr(t_tdb_ut1_utc, '_delta_tdb_tt')
assert hasattr(t_tdb_ut1_utc, '_delta_ut1_utc')
# adding or subtracting some time should remove the delta's
# since these are time-dependent and should be recalculated
for op in (operator.add, operator.sub):
t1 = op(t_tdb, dt)
assert not hasattr(t1, '_delta_tdb_tt')
assert not hasattr(t1, '_delta_ut1_utc')
t2 = op(t_tdb_ut1, dt)
assert not hasattr(t2, '_delta_tdb_tt')
assert not hasattr(t2, '_delta_ut1_utc')
t3 = op(t_tdb_ut1_utc, dt)
assert not hasattr(t3, '_delta_tdb_tt')
assert not hasattr(t3, '_delta_ut1_utc')
def test_set_format(self):
"""
Test basics of setting format attribute.
"""
dt = TimeDelta(86400.0, format='sec')
assert dt.value == 86400.0
assert dt.format == 'sec'
dt.format = 'jd'
assert dt.value == 1.0
assert dt.format == 'jd'
dt.format = 'datetime'
assert dt.value == timedelta(days=1)
assert dt.format == 'datetime'
def test_from_non_float(self):
dt = TimeDelta('1.000000000000001', format='jd')
assert dt != TimeDelta(1.000000000000001, format='jd') # precision loss.
assert dt == TimeDelta(1, .000000000000001, format='jd')
dt2 = TimeDelta(Decimal('1.000000000000001'), format='jd')
assert dt2 == dt
def test_to_value(self):
dt = TimeDelta(86400.0, format='sec')
assert dt.to_value('jd') == 1.
assert dt.to_value('jd', 'str') == '1.0'
assert dt.to_value('sec', subfmt='str') == '86400.0'
with pytest.raises(ValueError, match=("not one of the known formats.*"
"failed to parse as a unit")):
dt.to_value('julian')
with pytest.raises(TypeError, match='missing required format or unit'):
dt.to_value()
class TestTimeDeltaScales:
"""Test scale conversion for Time Delta.
Go through @taldcroft's list of expected behavior from #1932"""
def setup(self):
# pick a date that includes a leap second for better testing
self.iso_times = ['2012-06-30 12:00:00', '2012-06-30 23:59:59',
'2012-07-01 00:00:00', '2012-07-01 12:00:00']
self.t = {scale: Time(self.iso_times, scale=scale, precision=9)
for scale in TIME_SCALES}
self.dt = {scale: self.t[scale] - self.t[scale][0]
for scale in TIME_SCALES}
def test_delta_scales_definition(self):
for scale in list(TIME_DELTA_SCALES) + [None]:
TimeDelta([0., 1., 10.], format='sec', scale=scale)
with pytest.raises(ScaleValueError):
TimeDelta([0., 1., 10.], format='sec', scale='utc')
@pytest.mark.parametrize(('scale1', 'scale2'),
list(itertools.product(STANDARD_TIME_SCALES,
STANDARD_TIME_SCALES)))
def test_standard_scales_for_time_minus_time(self, scale1, scale2):
"""T(X) - T2(Y) -- does T(X) - T2(Y).X and return dT(X)
and T(X) +/- dT(Y) -- does (in essence) (T(X).Y +/- dT(Y)).X
I.e., time differences of two times should have the scale of the
first time. The one exception is UTC, which returns TAI.
There are no standard timescales for which this does not work.
"""
t1 = self.t[scale1]
t2 = self.t[scale2]
dt = t1 - t2
if scale1 in TIME_DELTA_SCALES:
assert dt.scale == scale1
else:
assert scale1 == 'utc'
assert dt.scale == 'tai'
# now check with delta time; also check reversibility
t1_recover_t2_scale = t2 + dt
assert t1_recover_t2_scale.scale == scale2
t1_recover = getattr(t1_recover_t2_scale, scale1)
assert allclose_jd(t1_recover.jd, t1.jd)
t2_recover_t1_scale = t1 - dt
assert t2_recover_t1_scale.scale == scale1
t2_recover = getattr(t2_recover_t1_scale, scale2)
assert allclose_jd(t2_recover.jd, t2.jd)
def test_local_scales_for_time_minus_time(self):
""" T1(local) - T2(local) should return dT(local)
T1(local) +/- dT(local) or T1(local) +/- Quantity(time-like) should
also return T(local)
I.e. Tests that time differences of two local scale times should
return delta time with local timescale. Furthermore, checks that
arithmetic of T(local) with dT(None) or time-like quantity does work.
Also tests that subtracting two Time objects, one having local time
scale and other having standard time scale should raise TypeError.
"""
t1 = self.t['local']
t2 = Time('2010-01-01', scale='local')
dt = t1 - t2
assert dt.scale == 'local'
# now check with delta time
t1_recover = t2 + dt
assert t1_recover.scale == 'local'
assert allclose_jd(t1_recover.jd, t1.jd)
# check that dT(None) can be subtracted from T(local)
dt2 = TimeDelta([10.], format='sec', scale=None)
t3 = t2 - dt2
assert t3.scale == t2.scale
# check that time quantity can be subtracted from T(local)
q = 10 * u.s
assert (t2 - q).value == (t2 - dt2).value
# Check that one cannot subtract/add times with a standard scale
# from a local one (or vice versa)
t1 = self.t['local']
for scale in STANDARD_TIME_SCALES:
t2 = self.t[scale]
with pytest.raises(TypeError):
t1 - t2
with pytest.raises(TypeError):
t2 - t1
with pytest.raises(TypeError):
t2 - dt
with pytest.raises(TypeError):
t2 + dt
with pytest.raises(TypeError):
dt + t2
def test_scales_for_delta_minus_delta(self):
"""dT(X) +/- dT2(Y) -- Add/substract JDs for dT(X) and dT(Y).X
I.e. this will succeed if dT(Y) can be converted to scale X.
Returns delta time in scale X
"""
# geocentric timescales
dt_tai = self.dt['tai']
dt_tt = self.dt['tt']
dt0 = dt_tai - dt_tt
assert dt0.scale == 'tai'
# tai and tt have the same scale, so differences should be the same
assert allclose_sec(dt0.sec, 0.)
dt_tcg = self.dt['tcg']
dt1 = dt_tai - dt_tcg
assert dt1.scale == 'tai'
# tai and tcg do not have the same scale, so differences different
assert not allclose_sec(dt1.sec, 0.)
t_tai_tcg = self.t['tai'].tcg
dt_tai_tcg = t_tai_tcg - t_tai_tcg[0]
dt2 = dt_tai - dt_tai_tcg
assert dt2.scale == 'tai'
# but if tcg difference calculated from tai, it should roundtrip
assert allclose_sec(dt2.sec, 0.)
# check that if we put TCG first, we get a TCG scale back
dt3 = dt_tai_tcg - dt_tai
assert dt3.scale == 'tcg'
assert allclose_sec(dt3.sec, 0.)
for scale in 'tdb', 'tcb', 'ut1':
with pytest.raises(TypeError):
dt_tai - self.dt[scale]
# barycentric timescales
dt_tcb = self.dt['tcb']
dt_tdb = self.dt['tdb']
dt4 = dt_tcb - dt_tdb
assert dt4.scale == 'tcb'
assert not allclose_sec(dt1.sec, 0.)
t_tcb_tdb = self.t['tcb'].tdb
dt_tcb_tdb = t_tcb_tdb - t_tcb_tdb[0]
dt5 = dt_tcb - dt_tcb_tdb
assert dt5.scale == 'tcb'
assert allclose_sec(dt5.sec, 0.)
for scale in 'utc', 'tai', 'tt', 'tcg', 'ut1':
with pytest.raises(TypeError):
dt_tcb - self.dt[scale]
# rotational timescale
dt_ut1 = self.dt['ut1']
dt5 = dt_ut1 - dt_ut1[-1]
assert dt5.scale == 'ut1'
assert dt5[-1].sec == 0.
for scale in 'utc', 'tai', 'tt', 'tcg', 'tcb', 'tdb':
with pytest.raises(TypeError):
dt_ut1 - self.dt[scale]
# local time scale
dt_local = self.dt['local']
dt6 = dt_local - dt_local[-1]
assert dt6.scale == 'local'
assert dt6[-1].sec == 0.
for scale in 'utc', 'tai', 'tt', 'tcg', 'tcb', 'tdb', 'ut1':
with pytest.raises(TypeError):
dt_local - self.dt[scale]
@pytest.mark.parametrize(
('scale', 'op'), list(itertools.product(TIME_SCALES,
(operator.add, operator.sub))))
def test_scales_for_delta_scale_is_none(self, scale, op):
"""T(X) +/- dT(None) or T(X) +/- Quantity(time-like)
This is always allowed and just adds JDs, i.e., the scale of
the TimeDelta or time-like Quantity will be taken to be X.
The one exception is again for X=UTC, where TAI is assumed instead,
so that a day is always defined as 86400 seconds.
"""
dt_none = TimeDelta([0., 1., -1., 1000.], format='sec')
assert dt_none.scale is None
q_time = dt_none.to('s')
dt = self.dt[scale]
dt1 = op(dt, dt_none)
assert dt1.scale == dt.scale
assert allclose_jd(dt1.jd, op(dt.jd, dt_none.jd))
dt2 = op(dt_none, dt)
assert dt2.scale == dt.scale
assert allclose_jd(dt2.jd, op(dt_none.jd, dt.jd))
dt3 = op(q_time, dt)
assert dt3.scale == dt.scale
assert allclose_jd(dt3.jd, dt2.jd)
t = self.t[scale]
t1 = op(t, dt_none)
assert t1.scale == t.scale
assert allclose_jd(t1.jd, op(t.jd, dt_none.jd))
if op is operator.add:
t2 = op(dt_none, t)
assert t2.scale == t.scale
assert allclose_jd(t2.jd, t1.jd)
t3 = op(t, q_time)
assert t3.scale == t.scale
assert allclose_jd(t3.jd, t1.jd)
@pytest.mark.parametrize('scale', TIME_SCALES)
def test_delta_day_is_86400_seconds(self, scale):
"""TimeDelta or Quantity holding 1 day always means 24*60*60 seconds
This holds true for all timescales but UTC, for which leap-second
days are longer or shorter by one second.
"""
t = self.t[scale]
dt_day = TimeDelta(1., format='jd')
q_day = dt_day.to('day')
dt_day_leap = t[-1] - t[0]
# ^ = exclusive or, so either equal and not UTC, or not equal and UTC
assert allclose_jd(dt_day_leap.jd, dt_day.jd) ^ (scale == 'utc')
t1 = t[0] + dt_day
assert allclose_jd(t1.jd, t[-1].jd) ^ (scale == 'utc')
t2 = q_day + t[0]
assert allclose_jd(t2.jd, t[-1].jd) ^ (scale == 'utc')
t3 = t[-1] - dt_day
assert allclose_jd(t3.jd, t[0].jd) ^ (scale == 'utc')
t4 = t[-1] - q_day
assert allclose_jd(t4.jd, t[0].jd) ^ (scale == 'utc')
def test_timedelta_setitem():
t = TimeDelta([1, 2, 3] * u.d, format='jd')
t[0] = 0.5
assert allclose_jd(t.value, [0.5, 2, 3])
t[1:] = 4.5
assert allclose_jd(t.value, [0.5, 4.5, 4.5])
t[:] = 86400 * u.s
assert allclose_jd(t.value, [1, 1, 1])
t[1] = TimeDelta(2, format='jd')
assert allclose_jd(t.value, [1, 2, 1])
with pytest.raises(ValueError) as err:
t[1] = 1 * u.m
assert 'cannot convert value to a compatible TimeDelta' in str(err.value)
def test_timedelta_setitem_sec():
t = TimeDelta([1, 2, 3], format='sec')
t[0] = 0.5
assert allclose_jd(t.value, [0.5, 2, 3])
t[1:] = 4.5
assert allclose_jd(t.value, [0.5, 4.5, 4.5])
t[:] = 1 * u.day
assert allclose_jd(t.value, [86400, 86400, 86400])
t[1] = TimeDelta(2, format='jd')
assert allclose_jd(t.value, [86400, 86400 * 2, 86400])
with pytest.raises(ValueError) as err:
t[1] = 1 * u.m
assert 'cannot convert value to a compatible TimeDelta' in str(err.value)
def test_timedelta_mask():
t = TimeDelta([1, 2] * u.d, format='jd')
t[1] = np.ma.masked
assert np.all(t.mask == [False, True])
assert allclose_jd(t[0].value, 1)
assert t.value[1] is np.ma.masked
def test_python_timedelta_scalar():
td = timedelta(days=1, seconds=1)
td1 = TimeDelta(td, format='datetime')
assert td1.sec == 86401.0
td2 = TimeDelta(86401.0, format='sec')
assert td2.datetime == td
def test_python_timedelta_vector():
td = [[timedelta(days=1), timedelta(days=2)],
[timedelta(days=3), timedelta(days=4)]]
td1 = TimeDelta(td, format='datetime')
assert np.all(td1.jd == [[1, 2], [3, 4]])
td2 = TimeDelta([[1, 2], [3, 4]], format='jd')
assert np.all(td2.datetime == td)
def test_timedelta_to_datetime():
td = TimeDelta(1, format='jd')
assert td.to_datetime() == timedelta(days=1)
td2 = TimeDelta([[1, 2], [3, 4]], format='jd')
td = [[timedelta(days=1), timedelta(days=2)],
[timedelta(days=3), timedelta(days=4)]]
assert np.all(td2.to_datetime() == td)
def test_insert_timedelta():
tm = TimeDelta([1, 2], format='sec')
# Insert a scalar using an auto-parsed string
tm2 = tm.insert(1, TimeDelta([10, 20], format='sec'))
assert np.all(tm2 == TimeDelta([1, 10, 20, 2], format='sec'))
def test_no_units_warning():
with pytest.warns(TimeDeltaMissingUnitWarning):
delta = TimeDelta(1)
assert delta.to_value(u.day) == 1
with pytest.warns(TimeDeltaMissingUnitWarning):
table = Table({"t": [1, 2, 3]})
delta = TimeDelta(table["t"])
assert np.all(delta.to_value(u.day) == [1, 2, 3])
with pytest.warns(TimeDeltaMissingUnitWarning):
delta = TimeDelta(np.array([1, 2, 3]))
assert np.all(delta.to_value(u.day) == [1, 2, 3])
with pytest.warns(TimeDeltaMissingUnitWarning):
t = Time('2012-01-01') + 1
assert t.isot[:10] == '2012-01-02'
with pytest.warns(TimeDeltaMissingUnitWarning):
comp = TimeDelta([1, 2, 3], format="jd") >= 2
assert np.all(comp == [False, True, True])
with pytest.warns(TimeDeltaMissingUnitWarning):
# 2 is also interpreted as days, not seconds
assert (TimeDelta(5 * u.s) > 2) is False
# with unit is ok
assert TimeDelta(1 * u.s).to_value(u.s) == 1
# with format is also ok
assert TimeDelta(1, format="sec").to_value(u.s) == 1
assert TimeDelta(1, format="jd").to_value(u.day) == 1
# table column with units
table = Table({"t": [1, 2, 3] * u.s})
assert np.all(TimeDelta(table["t"]).to_value(u.s) == [1, 2, 3])
|
4a508978a6adc72c65f73aaccdd78238b6b55a8a7af2db08805d8db6037af758 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import copy
import functools
import datetime
from copy import deepcopy
from decimal import Decimal, localcontext
from io import StringIO
import numpy as np
import pytest
from numpy.testing import assert_allclose
import erfa
from erfa import ErfaWarning
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils import isiterable, iers
from astropy.time import (Time, TimeDelta, ScaleValueError, STANDARD_TIME_SCALES,
TimeString, TimezoneInfo, TIME_FORMATS)
from astropy.coordinates import EarthLocation
from astropy import units as u
from astropy.table import Column, Table
from astropy.utils.compat.optional_deps import HAS_PYTZ, HAS_H5PY # noqa
allclose_jd = functools.partial(np.allclose, rtol=np.finfo(float).eps, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=np.finfo(float).eps,
atol=np.finfo(float).eps) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=np.finfo(float).eps,
atol=np.finfo(float).eps * 24 * 3600)
allclose_year = functools.partial(np.allclose, rtol=np.finfo(float).eps,
atol=0.) # 14 microsec at current epoch
def setup_function(func):
func.FORMATS_ORIG = deepcopy(Time.FORMATS)
def teardown_function(func):
Time.FORMATS.clear()
Time.FORMATS.update(func.FORMATS_ORIG)
class TestBasic:
"""Basic tests stemming from initial example and API reference"""
def test_simple(self):
times = ['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00']
t = Time(times, format='iso', scale='utc')
assert (repr(t) == "<Time object: scale='utc' format='iso' "
"value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>")
assert allclose_jd(t.jd1, np.array([2451180., 2455198.]))
assert allclose_jd2(t.jd2, np.array([-0.5 + 1.4288980208333335e-06,
-0.50000000e+00]))
# Set scale to TAI
t = t.tai
assert (repr(t) == "<Time object: scale='tai' format='iso' "
"value=['1999-01-01 00:00:32.123' '2010-01-01 00:00:34.000']>")
assert allclose_jd(t.jd1, np.array([2451180., 2455198.]))
assert allclose_jd2(t.jd2, np.array([-0.5 + 0.00037179926839122024,
-0.5 + 0.00039351851851851852]))
# Get a new ``Time`` object which is referenced to the TT scale
# (internal JD1 and JD1 are now with respect to TT scale)"""
assert (repr(t.tt) == "<Time object: scale='tt' format='iso' "
"value=['1999-01-01 00:01:04.307' '2010-01-01 00:01:06.184']>")
# Get the representation of the ``Time`` object in a particular format
# (in this case seconds since 1998.0). This returns either a scalar or
# array, depending on whether the input was a scalar or array"""
assert allclose_sec(t.cxcsec, np.array([31536064.307456788, 378691266.18400002]))
def test_different_dimensions(self):
"""Test scalars, vector, and higher-dimensions"""
# scalar
val, val1 = 2450000.0, 0.125
t1 = Time(val, val1, format='jd')
assert t1.isscalar is True and t1.shape == ()
# vector
val = np.arange(2450000., 2450010.)
t2 = Time(val, format='jd')
assert t2.isscalar is False and t2.shape == val.shape
# explicitly check broadcasting for mixed vector, scalar.
val2 = 0.
t3 = Time(val, val2, format='jd')
assert t3.isscalar is False and t3.shape == val.shape
val2 = (np.arange(5.) / 10.).reshape(5, 1)
# now see if broadcasting to two-dimensional works
t4 = Time(val, val2, format='jd')
assert t4.isscalar is False
assert t4.shape == np.broadcast(val, val2).shape
@pytest.mark.parametrize('format_', Time.FORMATS)
def test_empty_value(self, format_):
t = Time([], format=format_)
assert t.size == 0
assert t.shape == (0,)
assert t.format == format_
t_value = t.value
assert t_value.size == 0
assert t_value.shape == (0,)
t2 = Time(t_value, format=format_)
assert t2.size == 0
assert t2.shape == (0,)
assert t2.format == format_
t3 = t2.tai
assert t3.size == 0
assert t3.shape == (0,)
assert t3.format == format_
assert t3.scale == 'tai'
@pytest.mark.parametrize('value', [2455197.5, [2455197.5]])
def test_copy_time(self, value):
"""Test copying the values of a Time object by passing it into the
Time initializer.
"""
t = Time(value, format='jd', scale='utc')
t2 = Time(t, copy=False)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is t2._time.jd1
assert t._time.jd2 is t2._time.jd2
t2 = Time(t, copy=True)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is not t2._time.jd1
assert t._time.jd2 is not t2._time.jd2
# Include initializers
t2 = Time(t, format='iso', scale='tai', precision=1)
assert t2.value == '2010-01-01 00:00:34.0'
t2 = Time(t, format='iso', scale='tai', out_subfmt='date')
assert t2.value == '2010-01-01'
def test_getitem(self):
"""Test that Time objects holding arrays are properly subscriptable,
set isscalar as appropriate, and also subscript delta_ut1_utc, etc."""
mjd = np.arange(50000, 50010)
t = Time(mjd, format='mjd', scale='utc', location=('45d', '50d'))
t1 = t[3]
assert t1.isscalar is True
assert t1._time.jd1 == t._time.jd1[3]
assert t1.location is t.location
t1a = Time(mjd[3], format='mjd', scale='utc')
assert t1a.isscalar is True
assert np.all(t1._time.jd1 == t1a._time.jd1)
t1b = Time(t[3])
assert t1b.isscalar is True
assert np.all(t1._time.jd1 == t1b._time.jd1)
t2 = t[4:6]
assert t2.isscalar is False
assert np.all(t2._time.jd1 == t._time.jd1[4:6])
assert t2.location is t.location
t2a = Time(t[4:6])
assert t2a.isscalar is False
assert np.all(t2a._time.jd1 == t._time.jd1[4:6])
t2b = Time([t[4], t[5]])
assert t2b.isscalar is False
assert np.all(t2b._time.jd1 == t._time.jd1[4:6])
t2c = Time((t[4], t[5]))
assert t2c.isscalar is False
assert np.all(t2c._time.jd1 == t._time.jd1[4:6])
t.delta_tdb_tt = np.arange(len(t)) # Explicitly set (not testing .tdb)
t3 = t[4:6]
assert np.all(t3._delta_tdb_tt == t._delta_tdb_tt[4:6])
t4 = Time(mjd, format='mjd', scale='utc',
location=(np.arange(len(mjd)), np.arange(len(mjd))))
t5a = t4[3]
assert t5a.location == t4.location[3]
assert t5a.location.shape == ()
t5b = t4[3:4]
assert t5b.location.shape == (1,)
# Check that indexing a size-1 array returns a scalar location as well;
# see gh-10113.
t5c = t5b[0]
assert t5c.location.shape == ()
t6 = t4[4:6]
assert np.all(t6.location == t4.location[4:6])
# check it is a view
# (via ndarray, since quantity setter problematic for structured array)
allzeros = np.array((0., 0., 0.), dtype=t4.location.dtype)
assert t6.location.view(np.ndarray)[-1] != allzeros
assert t4.location.view(np.ndarray)[5] != allzeros
t6.location.view(np.ndarray)[-1] = allzeros
assert t4.location.view(np.ndarray)[5] == allzeros
# Test subscription also works for two-dimensional arrays.
frac = np.arange(0., 0.999, 0.2)
t7 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=('45d', '50d'))
assert t7[0, 0]._time.jd1 == t7._time.jd1[0, 0]
assert t7[0, 0].isscalar is True
assert np.all(t7[5]._time.jd1 == t7._time.jd1[5])
assert np.all(t7[5]._time.jd2 == t7._time.jd2[5])
assert np.all(t7[:, 2]._time.jd1 == t7._time.jd1[:, 2])
assert np.all(t7[:, 2]._time.jd2 == t7._time.jd2[:, 2])
assert np.all(t7[:, 0]._time.jd1 == t._time.jd1)
assert np.all(t7[:, 0]._time.jd2 == t._time.jd2)
# Get tdb to check that delta_tdb_tt attribute is sliced properly.
t7_tdb = t7.tdb
assert t7_tdb[0, 0].delta_tdb_tt == t7_tdb.delta_tdb_tt[0, 0]
assert np.all(t7_tdb[5].delta_tdb_tt == t7_tdb.delta_tdb_tt[5])
assert np.all(t7_tdb[:, 2].delta_tdb_tt == t7_tdb.delta_tdb_tt[:, 2])
# Explicitly set delta_tdb_tt attribute. Now it should not be sliced.
t7.delta_tdb_tt = 0.1
t7_tdb2 = t7.tdb
assert t7_tdb2[0, 0].delta_tdb_tt == 0.1
assert t7_tdb2[5].delta_tdb_tt == 0.1
assert t7_tdb2[:, 2].delta_tdb_tt == 0.1
# Check broadcasting of location.
t8 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=(np.arange(len(frac)), np.arange(len(frac))))
assert t8[0, 0].location == t8.location[0, 0]
assert np.all(t8[5].location == t8.location[5])
assert np.all(t8[:, 2].location == t8.location[:, 2])
# Finally check empty array.
t9 = t[:0]
assert t9.isscalar is False
assert t9.shape == (0,)
assert t9.size == 0
def test_properties(self):
"""Use properties to convert scales and formats. Note that the UT1 to
UTC transformation requires a supplementary value (``delta_ut1_utc``)
that can be obtained by interpolating from a table supplied by IERS.
This is tested separately."""
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert allclose_jd(t.jd, 2455197.5)
assert t.iso == '2010-01-01 00:00:00.000'
assert t.tt.iso == '2010-01-01 00:01:06.184'
assert t.tai.fits == '2010-01-01T00:00:34.000'
assert allclose_jd(t.utc.jd, 2455197.5)
assert allclose_jd(t.ut1.jd, 2455197.500003867)
assert t.tcg.isot == '2010-01-01T00:01:06.910'
assert allclose_sec(t.unix, 1262304000.0)
assert allclose_sec(t.cxcsec, 378691266.184)
assert allclose_sec(t.gps, 946339215.0)
assert t.datetime == datetime.datetime(2010, 1, 1)
def test_precision(self):
"""Set the output precision which is used for some formats. This is
also a test of the code that provides a dict for global and instance
options."""
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
# Uses initial class-defined precision=3
assert t.iso == '2010-01-01 00:00:00.000'
# Set instance precision to 9
t.precision = 9
assert t.iso == '2010-01-01 00:00:00.000000000'
assert t.tai.utc.iso == '2010-01-01 00:00:00.000000000'
def test_precision_input(self):
"""Verifies that precision can only be 0-9 (inclusive). Any other
value should raise a ValueError exception."""
err_message = 'precision attribute must be an int'
with pytest.raises(ValueError, match=err_message):
t = Time('2010-01-01 00:00:00', format='iso', scale='utc',
precision=10)
with pytest.raises(ValueError, match=err_message):
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
t.precision = -1
def test_transforms(self):
"""Transform from UTC to all supported time scales (TAI, TCB, TCG,
TDB, TT, UT1, UTC). This requires auxiliary information (latitude and
longitude)."""
lat = 19.48125
lon = -155.933222
t = Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=7, location=(lon, lat))
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == '2006-01-15 21:24:37.5000000'
assert t.ut1.iso == '2006-01-15 21:24:37.8341000'
assert t.tai.iso == '2006-01-15 21:25:10.5000000'
assert t.tt.iso == '2006-01-15 21:25:42.6840000'
assert t.tcg.iso == '2006-01-15 21:25:43.3226905'
assert t.tdb.iso == '2006-01-15 21:25:42.6843728'
assert t.tcb.iso == '2006-01-15 21:25:56.8939523'
def test_transforms_no_location(self):
"""Location should default to geocenter (relevant for TDB, TCB)."""
t = Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=7)
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == '2006-01-15 21:24:37.5000000'
assert t.ut1.iso == '2006-01-15 21:24:37.8341000'
assert t.tai.iso == '2006-01-15 21:25:10.5000000'
assert t.tt.iso == '2006-01-15 21:25:42.6840000'
assert t.tcg.iso == '2006-01-15 21:25:43.3226905'
assert t.tdb.iso == '2006-01-15 21:25:42.6843725'
assert t.tcb.iso == '2006-01-15 21:25:56.8939519'
# Check we get the same result
t2 = Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
location=(0*u.m, 0*u.m, 0*u.m))
assert t == t2
assert t.tdb == t2.tdb
def test_location(self):
"""Check that location creates an EarthLocation object, and that
such objects can be used as arguments.
"""
lat = 19.48125
lon = -155.933222
t = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=(lon, lat))
assert isinstance(t.location, EarthLocation)
location = EarthLocation(lon, lat)
t2 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=location)
assert isinstance(t2.location, EarthLocation)
assert t2.location == t.location
t3 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=(location.x, location.y, location.z))
assert isinstance(t3.location, EarthLocation)
assert t3.location == t.location
def test_location_array(self):
"""Check that location arrays are checked for size and used
for the corresponding times. Also checks that erfa
can handle array-valued locations, and can broadcast these if needed.
"""
lat = 19.48125
lon = -155.933222
t = Time(['2006-01-15 21:24:37.5'] * 2, format='iso', scale='utc',
precision=6, location=(lon, lat))
assert np.all(t.utc.iso == '2006-01-15 21:24:37.500000')
assert np.all(t.tdb.iso[0] == '2006-01-15 21:25:42.684373')
t2 = Time(['2006-01-15 21:24:37.5'] * 2, format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
assert np.all(t2.utc.iso == '2006-01-15 21:24:37.500000')
assert t2.tdb.iso[0] == '2006-01-15 21:25:42.684373'
assert t2.tdb.iso[1] != '2006-01-15 21:25:42.684373'
with pytest.raises(ValueError): # 1 time, but two locations
Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
with pytest.raises(ValueError): # 3 times, but two locations
Time(['2006-01-15 21:24:37.5'] * 3, format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
# multidimensional
mjd = np.arange(50000., 50008.).reshape(4, 2)
t3 = Time(mjd, format='mjd', scale='utc', location=(lon, lat))
assert t3.shape == (4, 2)
assert t3.location.shape == ()
assert t3.tdb.shape == t3.shape
t4 = Time(mjd, format='mjd', scale='utc',
location=(np.array([lon, 0]), np.array([lat, 0])))
assert t4.shape == (4, 2)
assert t4.location.shape == t4.shape
assert t4.tdb.shape == t4.shape
t5 = Time(mjd, format='mjd', scale='utc',
location=(np.array([[lon], [0], [0], [0]]),
np.array([[lat], [0], [0], [0]])))
assert t5.shape == (4, 2)
assert t5.location.shape == t5.shape
assert t5.tdb.shape == t5.shape
def test_all_scale_transforms(self):
"""Test that standard scale transforms work. Does not test correctness,
except reversibility [#2074]. Also tests that standard scales can't be
converted to local scales"""
lat = 19.48125
lon = -155.933222
with iers.conf.set_temp('auto_download', False):
for scale1 in STANDARD_TIME_SCALES:
t1 = Time('2006-01-15 21:24:37.5', format='iso', scale=scale1,
location=(lon, lat))
for scale2 in STANDARD_TIME_SCALES:
t2 = getattr(t1, scale2)
t21 = getattr(t2, scale1)
assert allclose_jd(t21.jd, t1.jd)
# test for conversion to local scale
scale3 = 'local'
with pytest.raises(ScaleValueError):
t2 = getattr(t1, scale3)
def test_creating_all_formats(self):
"""Create a time object using each defined format"""
Time(2000.5, format='decimalyear')
Time(100.0, format='cxcsec')
Time(100.0, format='unix')
Time(100.0, format='gps')
Time(1950.0, format='byear', scale='tai')
Time(2000.0, format='jyear', scale='tai')
Time('B1950.0', format='byear_str', scale='tai')
Time('J2000.0', format='jyear_str', scale='tai')
Time('2000-01-01 12:23:34.0', format='iso', scale='tai')
Time('2000-01-01 12:23:34.0Z', format='iso', scale='utc')
Time('2000-01-01T12:23:34.0', format='isot', scale='tai')
Time('2000-01-01T12:23:34.0Z', format='isot', scale='utc')
Time('2000-01-01T12:23:34.0', format='fits')
Time('2000-01-01T12:23:34.0', format='fits', scale='tdb')
Time(2400000.5, 51544.0333981, format='jd', scale='tai')
Time(0.0, 51544.0333981, format='mjd', scale='tai')
Time('2000:001:12:23:34.0', format='yday', scale='tai')
Time('2000:001:12:23:34.0Z', format='yday', scale='utc')
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
Time(dt, format='datetime', scale='tai')
Time([dt, dt], format='datetime', scale='tai')
dt64 = np.datetime64('2012-06-18T02:00:05.453000000')
Time(dt64, format='datetime64', scale='tai')
Time([dt64, dt64], format='datetime64', scale='tai')
def test_local_format_transforms(self):
"""
Test transformation of local time to different formats
Transformation to formats with reference time should give
ScalevalueError
"""
t = Time('2006-01-15 21:24:37.5', scale='local')
assert_allclose(t.jd, 2453751.3921006946, atol=0.001 / 3600. / 24., rtol=0.)
assert_allclose(t.mjd, 53750.892100694444, atol=0.001 / 3600. / 24., rtol=0.)
assert_allclose(t.decimalyear, 2006.0408002758752, atol=0.001 / 3600. / 24. / 365., rtol=0.)
assert t.datetime == datetime.datetime(2006, 1, 15, 21, 24, 37, 500000)
assert t.isot == '2006-01-15T21:24:37.500'
assert t.yday == '2006:015:21:24:37.500'
assert t.fits == '2006-01-15T21:24:37.500'
assert_allclose(t.byear, 2006.04217888831, atol=0.001 / 3600. / 24. / 365., rtol=0.)
assert_allclose(t.jyear, 2006.0407723496082, atol=0.001 / 3600. / 24. / 365., rtol=0.)
assert t.byear_str == 'B2006.042'
assert t.jyear_str == 'J2006.041'
# epochTimeFormats
with pytest.raises(ScaleValueError):
t.gps
with pytest.raises(ScaleValueError):
t.unix
with pytest.raises(ScaleValueError):
t.cxcsec
with pytest.raises(ScaleValueError):
t.plot_date
def test_datetime(self):
"""
Test datetime format, including guessing the format from the input type
by not providing the format keyword to Time.
"""
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
dt2 = datetime.datetime(2001, 1, 1)
t = Time(dt, scale='utc', precision=9)
assert t.iso == '2000-01-02 03:04:05.123456000'
assert t.datetime == dt
assert t.value == dt
t2 = Time(t.iso, scale='utc')
assert t2.datetime == dt
t = Time([dt, dt2], scale='utc')
assert np.all(t.value == [dt, dt2])
t = Time('2000-01-01 01:01:01.123456789', scale='tai')
assert t.datetime == datetime.datetime(2000, 1, 1, 1, 1, 1, 123457)
# broadcasting
dt3 = (dt + (dt2-dt) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale='utc')
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1])
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2]))
assert Time(t3[2, 0]) == t3[2, 0]
def test_datetime64(self):
dt64 = np.datetime64('2000-01-02T03:04:05.123456789')
dt64_2 = np.datetime64('2000-01-02')
t = Time(dt64, scale='utc', precision=9, format='datetime64')
assert t.iso == '2000-01-02 03:04:05.123456789'
assert t.datetime64 == dt64
assert t.value == dt64
t2 = Time(t.iso, scale='utc')
assert t2.datetime64 == dt64
t = Time(dt64_2, scale='utc', precision=3, format='datetime64')
assert t.iso == '2000-01-02 00:00:00.000'
assert t.datetime64 == dt64_2
assert t.value == dt64_2
t2 = Time(t.iso, scale='utc')
assert t2.datetime64 == dt64_2
t = Time([dt64, dt64_2], scale='utc', format='datetime64')
assert np.all(t.value == [dt64, dt64_2])
t = Time('2000-01-01 01:01:01.123456789', scale='tai')
assert t.datetime64 == np.datetime64('2000-01-01T01:01:01.123456789')
# broadcasting
dt3 = (dt64 + (dt64_2-dt64) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale='utc', format='datetime64')
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1], format='datetime64')
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2], format='datetime64'))
assert Time(t3[2, 0], format='datetime64') == t3[2, 0]
def test_epoch_transform(self):
"""Besselian and julian epoch transforms"""
jd = 2457073.05631
t = Time(jd, format='jd', scale='tai', precision=6)
assert allclose_year(t.byear, 2015.1365941020817)
assert allclose_year(t.jyear, 2015.1349933196439)
assert t.byear_str == 'B2015.136594'
assert t.jyear_str == 'J2015.134993'
t2 = Time(t.byear, format='byear', scale='tai')
assert allclose_jd(t2.jd, jd)
t2 = Time(t.jyear, format='jyear', scale='tai')
assert allclose_jd(t2.jd, jd)
t = Time('J2015.134993', scale='tai', precision=6)
assert np.allclose(t.jd, jd, rtol=1e-10, atol=0) # J2015.134993 has 10 digit precision
assert t.byear_str == 'B2015.136594'
def test_input_validation(self):
"""Wrong input type raises error"""
times = [10, 20]
with pytest.raises(ValueError):
Time(times, format='iso', scale='utc')
with pytest.raises(ValueError):
Time('2000:001', format='jd', scale='utc')
with pytest.raises(ValueError): # unguessable
Time([])
with pytest.raises(ValueError):
Time([50000.0], ['bad'], format='mjd', scale='tai')
with pytest.raises(ValueError):
Time(50000.0, 'bad', format='mjd', scale='tai')
with pytest.raises(ValueError):
Time('2005-08-04T00:01:02.000Z', scale='tai')
# regression test against #3396
with pytest.raises(ValueError):
Time(np.nan, format='jd', scale='utc')
with pytest.raises(ValueError):
with pytest.warns(AstropyDeprecationWarning):
Time('2000-01-02T03:04:05(TAI)', scale='utc')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(TAI')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(UT(NIST)')
def test_utc_leap_sec(self):
"""Time behaves properly near or in UTC leap second. This
uses the 2012-06-30 leap second for testing."""
for year, month, day in ((2012, 6, 30), (2016, 12, 31)):
# Start with a day without a leap second and note rollover
yyyy_mm = f'{year:04d}-{month:02d}'
yyyy_mm_dd = f'{year:04d}-{month:02d}-{day:02d}'
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm + '-01 23:59:60.0', scale='utc')
assert t1.iso == yyyy_mm + '-02 00:00:00.000'
# Leap second is different
t1 = Time(yyyy_mm_dd + ' 23:59:59.900', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:59.900'
t1 = Time(yyyy_mm_dd + ' 23:59:60.000', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:60.000'
t1 = Time(yyyy_mm_dd + ' 23:59:60.999', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:60.999'
if month == 6:
yyyy_mm_dd_plus1 = f'{year:04d}-07-01'
else:
yyyy_mm_dd_plus1 = f'{year + 1:04d}-01-01'
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm_dd + ' 23:59:61.0', scale='utc')
assert t1.iso == yyyy_mm_dd_plus1 + ' 00:00:00.000'
# Delta time gives 2 seconds here as expected
t0 = Time(yyyy_mm_dd + ' 23:59:59', scale='utc')
t1 = Time(yyyy_mm_dd_plus1 + ' 00:00:00', scale='utc')
assert allclose_sec((t1 - t0).sec, 2.0)
def test_init_from_time_objects(self):
"""Initialize from one or more Time objects"""
t1 = Time('2007:001', scale='tai')
t2 = Time(['2007-01-02', '2007-01-03'], scale='utc')
# Init from a list of Time objects without an explicit scale
t3 = Time([t1, t2])
# Test that init appropriately combines a scalar (t1) and list (t2)
# and that scale and format are same as first element.
assert len(t3) == 3
assert t3.scale == t1.scale
assert t3.format == t1.format # t1 format is yday
assert np.all(t3.value == np.concatenate([[t1.yday], t2.tai.yday]))
# Init from a single Time object without a scale
t3 = Time(t1)
assert t3.isscalar
assert t3.scale == t1.scale
assert t3.format == t1.format
assert np.all(t3.value == t1.value)
# Init from a single Time object with scale specified
t3 = Time(t1, scale='utc')
assert t3.scale == 'utc'
assert np.all(t3.value == t1.utc.value)
# Init from a list of Time object with scale specified
t3 = Time([t1, t2], scale='tt')
assert t3.scale == 'tt'
assert t3.format == t1.format # yday
assert np.all(t3.value == np.concatenate([[t1.tt.yday], t2.tt.yday]))
# OK, how likely is this... but might as well test.
mjd = np.arange(50000., 50006.)
frac = np.arange(0., 0.999, 0.2)
t4 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc')
t5 = Time([t4[:2], t4[4:5]])
assert t5.shape == (3, 5)
# throw error when deriving local scale time
# from non local time scale
with pytest.raises(ValueError):
Time(t1, scale='local')
class TestVal2:
"""Tests related to val2"""
@pytest.mark.parametrize("d", [
dict(val="2001:001", val2="ignored", scale="utc"),
dict(val={'year': 2015, 'month': 2, 'day': 3,
'hour': 12, 'minute': 13, 'second': 14.567},
val2="ignored", scale="utc"),
dict(val=np.datetime64('2005-02-25'), val2="ignored", scale="utc"),
dict(val=datetime.datetime(2000, 1, 2, 12, 0, 0),
val2="ignored", scale="utc"),
])
def test_unused_val2_raises(self, d):
"""Test that providing val2 is for string input lets user know we won't use it"""
with pytest.raises(ValueError):
Time(**d)
def test_val2(self):
"""Various tests of the val2 input"""
t = Time([0.0, 50000.0], [50000.0, 0.0], format='mjd', scale='tai')
assert t.mjd[0] == t.mjd[1]
assert t.jd[0] == t.jd[1]
def test_val_broadcasts_against_val2(self):
mjd = np.arange(50000., 50007.)
frac = np.arange(0., 0.999, 0.2)
t = Time(mjd[:, np.newaxis], frac, format='mjd', scale='utc')
assert t.shape == (7, 5)
with pytest.raises(ValueError):
Time([0.0, 50000.0], [0.0, 1.0, 2.0], format='mjd', scale='tai')
def test_broadcast_not_writable(self):
val = (2458000 + np.arange(3))[:, None]
val2 = np.linspace(0, 1, 4, endpoint=False)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val + 0 * val2, val2=0 * val + val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1, 2] = t_i
t[1, 2] = t_i
assert t_b[1, 2] == t[1, 2], "writing worked"
assert t_b[0, 2] == t[0, 2], "broadcasting didn't cause problems"
assert t_b[1, 1] == t[1, 1], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
def test_broadcast_one_not_writable(self):
val = (2458000 + np.arange(3))
val2 = np.arange(1)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val + 0 * val2, val2=0 * val + val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1] = t_i
t[1] = t_i
assert t_b[1] == t[1], "writing worked"
assert t_b[0] == t[0], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
class TestSubFormat:
"""Test input and output subformat functionality"""
def test_input_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai')
assert np.all(t.iso == np.array(['2000-01-01 00:00:00.000',
'2000-01-01 01:01:00.000',
'2000-01-01 01:01:01.000',
'2000-01-01 01:01:01.123']))
# Heterogeneous input formats with in_subfmt='date_*'
times = ['2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai',
in_subfmt='date_*')
assert np.all(t.iso == np.array(['2000-01-01 01:01:00.000',
'2000-01-01 01:01:01.000',
'2000-01-01 01:01:01.123']))
def test_input_subformat_fail(self):
"""Failed format matching"""
with pytest.raises(ValueError):
Time('2000-01-01 01:01', format='iso', scale='tai',
in_subfmt='date')
def test_bad_input_subformat(self):
"""Non-existent input subformat"""
with pytest.raises(ValueError):
Time('2000-01-01 01:01', format='iso', scale='tai',
in_subfmt='doesnt exist')
def test_output_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai',
out_subfmt='date_hm')
assert np.all(t.iso == np.array(['2000-01-01 00:00',
'2000-01-01 01:01',
'2000-01-01 01:01',
'2000-01-01 01:01']))
def test_fits_format(self):
"""FITS format includes bigger years."""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01T01:01:01', '2000-01-01T01:01:01.123']
t = Time(times, format='fits', scale='tai')
assert np.all(t.fits == np.array(['2000-01-01T00:00:00.000',
'2000-01-01T01:01:01.000',
'2000-01-01T01:01:01.123']))
# Explicit long format for output, default scale is UTC.
t2 = Time(times, format='fits', out_subfmt='long*')
assert np.all(t2.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'+02000-01-01T01:01:01.123']))
# Implicit long format for output, because of negative year.
times[2] = '-00594-01-01'
t3 = Time(times, format='fits', scale='tai')
assert np.all(t3.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'-00594-01-01T00:00:00.000']))
# Implicit long format for output, because of large positive year.
times[2] = '+10594-01-01'
t4 = Time(times, format='fits', scale='tai')
assert np.all(t4.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'+10594-01-01T00:00:00.000']))
def test_yday_format(self):
"""Year:Day_of_year format"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-12-01', '2001-12-01 01:01:01.123']
t = Time(times, format='iso', scale='tai')
t.out_subfmt = 'date_hm'
assert np.all(t.yday == np.array(['2000:336:00:00',
'2001:335:01:01']))
t.out_subfmt = '*'
assert np.all(t.yday == np.array(['2000:336:00:00:00.000',
'2001:335:01:01:01.123']))
def test_scale_input(self):
"""Test for issues related to scale input"""
# Check case where required scale is defined by the TimeFormat.
# All three should work.
t = Time(100.0, format='cxcsec', scale='utc')
assert t.scale == 'utc'
t = Time(100.0, format='unix', scale='tai')
assert t.scale == 'tai'
t = Time(100.0, format='gps', scale='utc')
assert t.scale == 'utc'
# Check that bad scale is caught when format is specified
with pytest.raises(ScaleValueError):
Time(1950.0, format='byear', scale='bad scale')
# Check that bad scale is caught when format is auto-determined
with pytest.raises(ScaleValueError):
Time('2000:001:00:00:00', scale='bad scale')
def test_fits_scale(self):
"""Test that the previous FITS-string formatting can still be handled
but with a DeprecationWarning."""
for inputs in (("2000-01-02(TAI)", "tai"),
("1999-01-01T00:00:00.123(ET(NIST))", "tt"),
("2014-12-12T01:00:44.1(UTC)", "utc")):
with pytest.warns(AstropyDeprecationWarning):
t = Time(inputs[0])
assert t.scale == inputs[1]
# Create Time using normal ISOT syntax and compare with FITS
t2 = Time(inputs[0][:inputs[0].index("(")], format="isot",
scale=inputs[1])
assert t == t2
# Explicit check that conversions still work despite warning
with pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:00.123456789(UTC)')
t = t.tai
assert t.isot == '1999-01-01T00:00:32.123'
with pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(TAI)')
t = t.utc
assert t.isot == '1999-01-01T00:00:00.123'
# Check scale consistency
with pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(TAI)', scale="tai")
assert t.scale == "tai"
with pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(ET)', scale="tt")
assert t.scale == "tt"
with pytest.raises(ValueError), pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(TAI)', scale="utc")
def test_scale_default(self):
"""Test behavior when no scale is provided"""
# These first three are TimeFromEpoch and have an intrinsic time scale
t = Time(100.0, format='cxcsec')
assert t.scale == 'tt'
t = Time(100.0, format='unix')
assert t.scale == 'utc'
t = Time(100.0, format='gps')
assert t.scale == 'tai'
for date in ('2000:001', '2000-01-01T00:00:00'):
t = Time(date)
assert t.scale == 'utc'
t = Time(2000.1, format='byear')
assert t.scale == 'tt'
t = Time('J2000')
assert t.scale == 'tt'
def test_epoch_times(self):
"""Test time formats derived from EpochFromTime"""
t = Time(0.0, format='cxcsec', scale='tai')
assert t.tt.iso == '1998-01-01 00:00:00.000'
# Create new time object from this one and change scale, format
t2 = Time(t, scale='tt', format='iso')
assert t2.value == '1998-01-01 00:00:00.000'
# Value take from Chandra.Time.DateTime('2010:001:00:00:00').secs
t_cxcsec = 378691266.184
t = Time(t_cxcsec, format='cxcsec', scale='utc')
assert allclose_sec(t.value, t_cxcsec)
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.value, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
assert t.yday == '2010:001:00:00:00.000'
t = Time('2010:001:00:00:00.000', scale='utc')
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
# Round trip through epoch time
for scale in ('utc', 'tt'):
t = Time('2000:001', scale=scale)
t2 = Time(t.unix, scale=scale, format='unix')
assert getattr(t2, scale).iso == '2000-01-01 00:00:00.000'
# Test unix time. Values taken from http://en.wikipedia.org/wiki/Unix_time
t = Time('2013-05-20 21:18:46', scale='utc')
assert allclose_sec(t.unix, 1369084726.0)
assert allclose_sec(t.tt.unix, 1369084726.0)
# Values from issue #1118
t = Time('2004-09-16T23:59:59', scale='utc')
assert allclose_sec(t.unix, 1095379199.0)
def test_plot_date(self):
"""Test the plot_date format.
Depending on the situation with matplotlib, this can give different
results because the plot date epoch time changed in matplotlib 3.3. This
test tries to use the matplotlib date2num function to make the test
independent of version, but if matplotlib isn't available then the code
(and test) use the pre-3.3 epoch.
"""
try:
from matplotlib.dates import date2num
except ImportError:
# No matplotlib, in which case this uses the epoch 0000-12-31
# as per matplotlib < 3.3.
# Value from:
# matplotlib.dates.set_epoch('0000-12-31')
# val = matplotlib.dates.date2num('2000-01-01')
val = 730120.0
else:
val = date2num(datetime.datetime(2000, 1, 1))
t = Time('2000-01-01 00:00:00', scale='utc')
assert np.allclose(t.plot_date, val, atol=1e-5, rtol=0)
class TestNumericalSubFormat:
def test_explicit_example(self):
t = Time('54321.000000000001', format='mjd')
assert t == Time(54321, 1e-12, format='mjd')
assert t.mjd == 54321. # Lost precision!
assert t.value == 54321. # Lost precision!
assert t.to_value('mjd') == 54321. # Lost precision!
assert t.to_value('mjd', subfmt='str') == '54321.000000000001'
assert t.to_value('mjd', 'bytes') == b'54321.000000000001'
expected_long = np.longdouble(54321.) + np.longdouble(1e-12)
# Check we're the same to within the double holding jd2
# (which is less precise than longdouble on arm64).
assert np.allclose(t.to_value('mjd', subfmt='long'),
expected_long, rtol=0, atol=np.finfo(float).eps)
t.out_subfmt = 'str'
assert t.value == '54321.000000000001'
assert t.to_value('mjd') == 54321. # Lost precision!
assert t.mjd == '54321.000000000001'
assert t.to_value('mjd', subfmt='bytes') == b'54321.000000000001'
assert t.to_value('mjd', subfmt='float') == 54321. # Lost precision!
t.out_subfmt = 'long'
assert np.allclose(t.value, expected_long,
rtol=0., atol=np.finfo(float).eps)
assert np.allclose(t.to_value('mjd', subfmt=None), expected_long,
rtol=0., atol=np.finfo(float).eps)
assert np.allclose(t.mjd, expected_long,
rtol=0., atol=np.finfo(float).eps)
assert t.to_value('mjd', subfmt='str') == '54321.000000000001'
assert t.to_value('mjd', subfmt='float') == 54321. # Lost precision!
@pytest.mark.skipif(np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float")
def test_explicit_longdouble(self):
i = 54321
# Create a different long double (which will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
f = max(2.**(-np.finfo(np.longdouble).nmant) * 65536,
np.finfo(float).eps)
mjd_long = np.longdouble(i) + np.longdouble(f)
assert mjd_long != i, "longdouble failure!"
t = Time(mjd_long, format='mjd')
expected = Time(i, f, format='mjd')
assert abs(t - expected) <= 20. * u.ps
t_float = Time(i + f, format='mjd')
assert t_float == Time(i, format='mjd')
assert t_float != t
assert t.value == 54321. # Lost precision!
assert np.allclose(t.to_value('mjd', subfmt='long'), mjd_long,
rtol=0., atol=np.finfo(float).eps)
t2 = Time(mjd_long, format='mjd', out_subfmt='long')
assert np.allclose(t2.value, mjd_long,
rtol=0., atol=np.finfo(float).eps)
@pytest.mark.skipif(np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float")
def test_explicit_longdouble_one_val(self):
"""Ensure either val1 or val2 being longdouble is possible.
Regression test for issue gh-10033.
"""
i = 54321
f = max(2.**(-np.finfo(np.longdouble).nmant) * 65536,
np.finfo(float).eps)
t1 = Time(i, f, format='mjd')
t2 = Time(np.longdouble(i), f, format='mjd')
t3 = Time(i, np.longdouble(f), format='mjd')
t4 = Time(np.longdouble(i), np.longdouble(f), format='mjd')
assert t1 == t2 == t3 == t4
@pytest.mark.skipif(np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float")
@pytest.mark.parametrize("fmt", ["mjd", "unix", "cxcsec"])
def test_longdouble_for_other_types(self, fmt):
t_fmt = getattr(Time(58000, format="mjd"), fmt) # Get regular float
t_fmt_long = np.longdouble(t_fmt)
# Create a different long double (ensuring it will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
atol = np.finfo(float).eps * (1. if fmt == 'mjd' else 24. * 3600.)
t_fmt_long2 = t_fmt_long + max(
t_fmt_long * np.finfo(np.longdouble).eps * 2, atol)
assert t_fmt_long != t_fmt_long2, "longdouble weird!"
tm = Time(t_fmt_long, format=fmt)
tm2 = Time(t_fmt_long2, format=fmt)
assert tm != tm2
tm_long2 = tm2.to_value(fmt, subfmt='long')
assert np.allclose(tm_long2, t_fmt_long2, rtol=0., atol=atol)
def test_subformat_input(self):
s = '54321.01234567890123456789'
i, f = s.split('.') # Note, OK only for fraction < 0.5
t = Time(float(i), float('.' + f), format='mjd')
t_str = Time(s, format='mjd')
t_bytes = Time(s.encode('ascii'), format='mjd')
t_decimal = Time(Decimal(s), format='mjd')
assert t_str == t
assert t_bytes == t
assert t_decimal == t
@pytest.mark.parametrize('out_subfmt', ('str', 'bytes'))
def test_subformat_output(self, out_subfmt):
i = 54321
f = np.array([0., 1e-9, 1e-12])
t = Time(i, f, format='mjd', out_subfmt=out_subfmt)
t_value = t.value
expected = np.array(['54321.0',
'54321.000000001',
'54321.000000000001'], dtype=out_subfmt)
assert np.all(t_value == expected)
assert np.all(Time(expected, format='mjd') == t)
# Explicit sub-format.
t = Time(i, f, format='mjd')
t_mjd_subfmt = t.to_value('mjd', subfmt=out_subfmt)
assert np.all(t_mjd_subfmt == expected)
@pytest.mark.parametrize('fmt,string,val1,val2', [
('jd', '2451544.5333981', 2451544.5, .0333981),
('decimalyear', '2000.54321', 2000., .54321),
('cxcsec', '100.0123456', 100.0123456, None),
('unix', '100.0123456', 100.0123456, None),
('gps', '100.0123456', 100.0123456, None),
('byear', '1950.1', 1950.1, None),
('jyear', '2000.1', 2000.1, None)])
def test_explicit_string_other_formats(self, fmt, string, val1, val2):
t = Time(string, format=fmt)
assert t == Time(val1, val2, format=fmt)
assert t.to_value(fmt, subfmt='str') == string
def test_basic_subformat_setting(self):
t = Time('2001', format='jyear', scale='tai')
t.format = "mjd"
t.out_subfmt = "str"
assert t.value.startswith("5")
def test_basic_subformat_cache_does_not_crash(self):
t = Time('2001', format='jyear', scale='tai')
t.to_value('mjd', subfmt='str')
assert ('mjd', 'str') in t.cache['format']
t.to_value('mjd', 'str')
@pytest.mark.parametrize("fmt", ["jd", "mjd", "cxcsec", "unix", "gps", "jyear"])
def test_decimal_context_does_not_affect_string(self, fmt):
t = Time('2001', format='jyear', scale='tai')
t.format = fmt
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value(fmt, "str")
t2 = Time('2001', format='jyear', scale='tai')
t2.format = fmt
with localcontext() as ctx:
ctx.prec = 40
t2_s_40 = t.to_value(fmt, "str")
assert t_s_2 == t2_s_40, "String representation should not depend on Decimal context"
def test_decimal_context_caching(self):
t = Time(val=58000, val2=1e-14, format='mjd', scale='tai')
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value('mjd', subfmt='decimal')
t2 = Time(val=58000, val2=1e-14, format='mjd', scale='tai')
with localcontext() as ctx:
ctx.prec = 40
t_s_40 = t.to_value('mjd', subfmt='decimal')
t2_s_40 = t2.to_value('mjd', subfmt='decimal')
assert t_s_2 == t_s_40, "Should be the same but cache might make this automatic"
assert t_s_2 == t2_s_40, "Different precision should produce the same results"
@pytest.mark.parametrize("f, s, t", [("sec", "long", np.longdouble),
("sec", "decimal", Decimal),
("sec", "str", str)])
def test_timedelta_basic(self, f, s, t):
dt = (Time("58000", format="mjd", scale="tai")
- Time("58001", format="mjd", scale="tai"))
value = dt.to_value(f, s)
assert isinstance(value, t)
dt.format = f
dt.out_subfmt = s
assert isinstance(dt.value, t)
assert isinstance(dt.to_value(f, None), t)
def test_need_format_argument(self):
t = Time('J2000')
with pytest.raises(TypeError, match="missing.*required.*'format'"):
t.to_value()
with pytest.raises(ValueError, match='format must be one of'):
t.to_value('julian')
def test_wrong_in_subfmt(self):
with pytest.raises(ValueError, match='not among selected'):
Time("58000", format='mjd', in_subfmt='float')
with pytest.raises(ValueError, match='not among selected'):
Time(np.longdouble(58000), format='mjd', in_subfmt='float')
with pytest.raises(ValueError, match='not among selected'):
Time(58000., format='mjd', in_subfmt='str')
with pytest.raises(ValueError, match='not among selected'):
Time(58000., format='mjd', in_subfmt='long')
def test_wrong_subfmt(self):
t = Time(58000., format='mjd')
with pytest.raises(ValueError, match='must match one'):
t.to_value('mjd', subfmt='parrot')
with pytest.raises(ValueError, match='must match one'):
t.out_subfmt = 'parrot'
with pytest.raises(ValueError, match='must match one'):
t.in_subfmt = 'parrot'
def test_not_allowed_subfmt(self):
"""Test case where format has no defined subfmts"""
t = Time('J2000')
match = 'subformat not allowed for format jyear_str'
with pytest.raises(ValueError, match=match):
t.to_value('jyear_str', subfmt='parrot')
with pytest.raises(ValueError, match=match):
t.out_subfmt = 'parrot'
with pytest.raises(ValueError, match=match):
Time('J2000', out_subfmt='parrot')
with pytest.raises(ValueError, match=match):
t.in_subfmt = 'parrot'
with pytest.raises(ValueError, match=match):
Time('J2000', format='jyear_str', in_subfmt='parrot')
def test_switch_to_format_with_no_out_subfmt(self):
t = Time('2001-01-01', out_subfmt='date_hm')
assert t.out_subfmt == 'date_hm'
# Now do an in-place switch to format 'jyear_str' that has no subfmts
# where out_subfmt is changed to '*'.
t.format = 'jyear_str'
assert t.out_subfmt == '*'
assert t.value == 'J2001.001'
class TestSofaErrors:
"""Test that erfa status return values are handled correctly"""
def test_bad_time(self):
iy = np.array([2000], dtype=np.intc)
im = np.array([2000], dtype=np.intc) # bad month
id = np.array([2000], dtype=np.intc) # bad day
with pytest.raises(ValueError): # bad month, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = -5000
im[0] = 2
with pytest.raises(ValueError): # bad year, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = 2000
with pytest.warns(ErfaWarning, match=r'bad day \(JD computed\)') as w:
djm0, djm = erfa.cal2jd(iy, im, id)
assert len(w) == 1
assert allclose_jd(djm0, [2400000.5])
assert allclose_jd(djm, [53574.])
class TestCopyReplicate:
"""Test issues related to copying and replicating data"""
def test_immutable_input(self):
"""Internals are never mutable."""
jds = np.array([2450000.5], dtype=np.double)
t = Time(jds, format='jd', scale='tai')
assert allclose_jd(t.jd, jds)
jds[0] = 2458654
assert not allclose_jd(t.jd, jds)
mjds = np.array([50000.0], dtype=np.double)
t = Time(mjds, format='mjd', scale='tai')
assert allclose_jd(t.jd, [2450000.5])
mjds[0] = 0.0
assert allclose_jd(t.jd, [2450000.5])
def test_replicate(self):
"""Test replicate method"""
t = Time(['2000:001'], format='yday', scale='tai',
location=('45d', '45d'))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.replicate()
assert t.yday == t2.yday
assert t.format == t2.format
assert t.scale == t2.scale
assert t.location == t2.location
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday == t2.yday
assert t.yday != t_yday # prove that it changed
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x == t2.location.x
assert t.location.x != t_loc_x # prove that it changed
def test_copy(self):
"""Test copy method"""
t = Time('2000:001', format='yday', scale='tai',
location=('45d', '45d'))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.copy()
assert t.yday == t2.yday
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are not sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday != t2.yday
assert t.yday == t_yday # prove that it did not change
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x != t2.location.x
assert t.location.x == t_loc_x # prove that it changed
class TestStardate:
"""Sync chronometers with Starfleet Command"""
def test_iso_to_stardate(self):
assert str(Time('2320-01-01', scale='tai').stardate)[:7] == '1368.99'
assert str(Time('2330-01-01', scale='tai').stardate)[:8] == '10552.76'
assert str(Time('2340-01-01', scale='tai').stardate)[:8] == '19734.02'
@pytest.mark.parametrize('dates',
[(10000, '2329-05-26 03:02'),
(20000, '2340-04-15 19:05'),
(30000, '2351-03-07 11:08')])
def test_stardate_to_iso(self, dates):
stardate, iso = dates
t_star = Time(stardate, format='stardate')
t_iso = Time(t_star, format='iso', out_subfmt='date_hm')
assert t_iso.value == iso
def test_python_builtin_copy():
t = Time('2000:001', format='yday', scale='tai')
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
assert t.jd == t2.jd
assert t.jd == t3.jd
def test_now():
"""
Tests creating a Time object with the `now` class method.
"""
now = datetime.datetime.utcnow()
t = Time.now()
assert t.format == 'datetime'
assert t.scale == 'utc'
dt = t.datetime - now # a datetime.timedelta object
# this gives a .1 second margin between the `utcnow` call and the `Time`
# initializer, which is really way more generous than necessary - typical
# times are more like microseconds. But it seems safer in case some
# platforms have slow clock calls or something.
assert dt.total_seconds() < 0.1
def test_decimalyear():
t = Time('2001:001', format='yday')
assert t.decimalyear == 2001.0
t = Time(2000.0, [0.5, 0.75], format='decimalyear')
assert np.all(t.value == [2000.5, 2000.75])
jd0 = Time('2000:001').jd
jd1 = Time('2001:001').jd
d_jd = jd1 - jd0
assert np.all(t.jd == [jd0 + 0.5 * d_jd,
jd0 + 0.75 * d_jd])
def test_fits_year0():
t = Time(1721425.5, format='jd', scale='tai')
assert t.fits == '0001-01-01T00:00:00.000'
t = Time(1721425.5 - 366., format='jd', scale='tai')
assert t.fits == '+00000-01-01T00:00:00.000'
t = Time(1721425.5 - 366. - 365., format='jd', scale='tai')
assert t.fits == '-00001-01-01T00:00:00.000'
def test_fits_year10000():
t = Time(5373484.5, format='jd', scale='tai')
assert t.fits == '+10000-01-01T00:00:00.000'
t = Time(5373484.5 - 365., format='jd', scale='tai')
assert t.fits == '9999-01-01T00:00:00.000'
t = Time(5373484.5, -1. / 24. / 3600., format='jd', scale='tai')
assert t.fits == '9999-12-31T23:59:59.000'
def test_dir():
t = Time('2000:001', format='yday', scale='tai')
assert 'utc' in dir(t)
def test_time_from_epoch_jds():
"""Test that jd1/jd2 in a TimeFromEpoch format is always well-formed:
jd1 is an integral value and abs(jd2) <= 0.5.
"""
# From 1999:001 00:00 to 1999:002 12:00 by a non-round step. This will
# catch jd2 == 0 and a case of abs(jd2) == 0.5.
cxcsecs = np.linspace(0, 86400 * 1.5, 49)
for cxcsec in cxcsecs:
t = Time(cxcsec, format='cxcsec')
assert np.round(t.jd1) == t.jd1
assert np.abs(t.jd2) <= 0.5
t = Time(cxcsecs, format='cxcsec')
assert np.all(np.round(t.jd1) == t.jd1)
assert np.all(np.abs(t.jd2) <= 0.5)
assert np.any(np.abs(t.jd2) == 0.5) # At least one exactly 0.5
def test_bool():
"""Any Time object should evaluate to True unless it is empty [#3520]."""
t = Time(np.arange(50000, 50010), format='mjd', scale='utc')
assert bool(t) is True
assert bool(t[0]) is True
assert bool(t[:0]) is False
def test_len_size():
"""Check length of Time objects and that scalar ones do not have one."""
t = Time(np.arange(50000, 50010), format='mjd', scale='utc')
assert len(t) == 10 and t.size == 10
t1 = Time(np.arange(50000, 50010).reshape(2, 5), format='mjd', scale='utc')
assert len(t1) == 2 and t1.size == 10
# Can have length 1 or length 0 arrays.
t2 = t[:1]
assert len(t2) == 1 and t2.size == 1
t3 = t[:0]
assert len(t3) == 0 and t3.size == 0
# But cannot get length from scalar.
t4 = t[0]
with pytest.raises(TypeError) as err:
len(t4)
# Ensure we're not just getting the old error of
# "object of type 'float' has no len()".
assert 'Time' in str(err.value)
def test_TimeFormat_scale():
"""guard against recurrence of #1122, where TimeFormat class looses uses
attributes (delta_ut1_utc here), preventing conversion to unix, cxc"""
t = Time('1900-01-01', scale='ut1')
t.delta_ut1_utc = 0.0
with pytest.warns(ErfaWarning):
t.unix
assert t.unix == t.utc.unix
@pytest.mark.remote_data
def test_scale_conversion(monkeypatch):
# Check that if we have internet, and downloading is allowed, we
# can get conversion to UT1 for the present, since we will download
# IERS_A in IERS_Auto.
monkeypatch.setattr('astropy.utils.iers.conf.auto_download', True)
Time(Time.now().cxcsec, format='cxcsec', scale='ut1')
def test_byteorder():
"""Ensure that bigendian and little-endian both work (closes #2942)"""
mjd = np.array([53000.00, 54000.00])
big_endian = mjd.astype('>f8')
little_endian = mjd.astype('<f8')
time_mjd = Time(mjd, format='mjd')
time_big = Time(big_endian, format='mjd')
time_little = Time(little_endian, format='mjd')
assert np.all(time_big == time_mjd)
assert np.all(time_little == time_mjd)
def test_datetime_tzinfo():
"""
Test #3160 that time zone info in datetime objects is respected.
"""
class TZm6(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=-6)
d = datetime.datetime(2002, 1, 2, 10, 3, 4, tzinfo=TZm6())
t = Time(d)
assert t.value == datetime.datetime(2002, 1, 2, 16, 3, 4)
def test_subfmts_regex():
"""
Test having a custom subfmts with a regular expression
"""
class TimeLongYear(TimeString):
name = 'longyear'
subfmts = (('date',
r'(?P<year>[+-]\d{5})-%m-%d', # hybrid
'{year:+06d}-{mon:02d}-{day:02d}'),)
t = Time('+02000-02-03', format='longyear')
assert t.value == '+02000-02-03'
assert t.jd == Time('2000-02-03').jd
def test_set_format_basic():
"""
Test basics of setting format attribute.
"""
for format, value in (('jd', 2451577.5),
('mjd', 51577.0),
('cxcsec', 65923264.184), # confirmed with Chandra.Time
('datetime', datetime.datetime(2000, 2, 3, 0, 0)),
('iso', '2000-02-03 00:00:00.000')):
t = Time('+02000-02-03', format='fits')
t0 = t.replicate()
t.format = format
assert t.value == value
# Internal jd1 and jd2 are preserved
assert t._time.jd1 is t0._time.jd1
assert t._time.jd2 is t0._time.jd2
def test_unix_tai_format():
t = Time('2020-01-01', scale='utc')
assert allclose_sec(t.unix_tai - t.unix, 37.0)
t = Time('1970-01-01', scale='utc')
assert allclose_sec(t.unix_tai - t.unix, 8 + 8.2e-05)
def test_set_format_shares_subfmt():
"""
Set format and round trip through a format that shares out_subfmt
"""
t = Time('+02000-02-03', format='fits', out_subfmt='date_hms', precision=5)
tc = t.copy()
t.format = 'isot'
assert t.precision == 5
assert t.out_subfmt == 'date_hms'
assert t.value == '2000-02-03T00:00:00.00000'
t.format = 'fits'
assert t.value == tc.value
assert t.precision == 5
def test_set_format_does_not_share_subfmt():
"""
Set format and round trip through a format that does not share out_subfmt
"""
t = Time('+02000-02-03', format='fits', out_subfmt='longdate')
t.format = 'isot'
assert t.out_subfmt == '*' # longdate_hms not there, goes to default
assert t.value == '2000-02-03T00:00:00.000'
t.format = 'fits'
assert t.out_subfmt == '*'
assert t.value == '2000-02-03T00:00:00.000' # date_hms
def test_replicate_value_error():
"""
Passing a bad format to replicate should raise ValueError, not KeyError.
PR #3857.
"""
t1 = Time('2007:001', scale='tai')
with pytest.raises(ValueError) as err:
t1.replicate(format='definitely_not_a_valid_format')
assert 'format must be one of' in str(err.value)
def test_remove_astropy_time():
"""
Make sure that 'astropy_time' format is really gone after #3857. Kind of
silly test but just to be sure.
"""
t1 = Time('2007:001', scale='tai')
assert 'astropy_time' not in t1.FORMATS
with pytest.raises(ValueError) as err:
Time(t1, format='astropy_time')
assert 'format must be one of' in str(err.value)
def test_isiterable():
"""
Ensure that scalar `Time` instances are not reported as iterable by the
`isiterable` utility.
Regression test for https://github.com/astropy/astropy/issues/4048
"""
t1 = Time.now()
assert not isiterable(t1)
t2 = Time(['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00'],
format='iso', scale='utc')
assert isiterable(t2)
def test_to_datetime():
tz = TimezoneInfo(utc_offset=-10 * u.hour, tzname='US/Hawaii')
# The above lines produces a `datetime.tzinfo` object similar to:
# tzinfo = pytz.timezone('US/Hawaii')
time = Time('2010-09-03 00:00:00')
tz_aware_datetime = time.to_datetime(tz)
assert tz_aware_datetime.time() == datetime.time(14, 0)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1990-09-03 06:00:00'])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
with pytest.raises(ValueError, match=r'does not support leap seconds'):
Time('2015-06-30 23:59:60.000').to_datetime()
@pytest.mark.skipif('not HAS_PYTZ')
def test_to_datetime_pytz():
import pytz
tz = pytz.timezone('US/Hawaii')
time = Time('2010-09-03 00:00:00')
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz_aware_datetime.time() == datetime.time(14, 0)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1990-09-03 06:00:00'])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
def test_cache():
t = Time('2010-09-03 00:00:00')
t2 = Time('2010-09-03 00:00:00')
# Time starts out without a cache
assert 'cache' not in t._time.__dict__
# Access the iso format and confirm that the cached version is as expected
t.iso
assert t.cache['format']['iso'] == t2.iso
# Access the TAI scale and confirm that the cached version is as expected
t.tai
assert t.cache['scale']['tai'] == t2.tai
# New Time object after scale transform does not have a cache yet
assert 'cache' not in t.tt._time.__dict__
# Clear the cache
del t.cache
assert 'cache' not in t._time.__dict__
# Check accessing the cache creates an empty dictionary
assert not t.cache
assert 'cache' in t._time.__dict__
def test_epoch_date_jd_is_day_fraction():
"""
Ensure that jd1 and jd2 of an epoch Time are respect the (day, fraction) convention
(see #6638)
"""
t0 = Time("J2000", scale="tdb")
assert t0.jd1 == 2451545.0
assert t0.jd2 == 0.0
t1 = Time(datetime.datetime(2000, 1, 1, 12, 0, 0), scale="tdb")
assert t1.jd1 == 2451545.0
assert t1.jd2 == 0.0
def test_sum_is_equivalent():
"""
Ensure that two equal dates defined in different ways behave equally (#6638)
"""
t0 = Time("J2000", scale="tdb")
t1 = Time("2000-01-01 12:00:00", scale="tdb")
assert t0 == t1
assert (t0 + 1 * u.second) == (t1 + 1 * u.second)
def test_string_valued_columns():
# Columns have a nice shim that translates bytes to string as needed.
# Ensure Time can handle these. Use multi-d array just to be sure.
times = [[[f'{y:04d}-{m:02d}-{d:02d}' for d in range(1, 3)]
for m in range(5, 7)] for y in range(2012, 2014)]
cutf32 = Column(times)
cbytes = cutf32.astype('S')
tutf32 = Time(cutf32)
tbytes = Time(cbytes)
assert np.all(tutf32 == tbytes)
tutf32 = Time(Column(['B1950']))
tbytes = Time(Column([b'B1950']))
assert tutf32 == tbytes
# Regression tests for arrays with entries with unequal length. gh-6903.
times = Column([b'2012-01-01', b'2012-01-01T00:00:00'])
assert np.all(Time(times) == Time(['2012-01-01', '2012-01-01T00:00:00']))
def test_bytes_input():
tstring = '2011-01-02T03:04:05'
tbytes = b'2011-01-02T03:04:05'
assert tbytes.decode('ascii') == tstring
t0 = Time(tstring)
t1 = Time(tbytes)
assert t1 == t0
tarray = np.array(tbytes)
assert tarray.dtype.kind == 'S'
t2 = Time(tarray)
assert t2 == t0
def test_writeable_flag():
t = Time([1, 2, 3], format='cxcsec')
t[1] = 5.0
assert allclose_sec(t[1].value, 5.0)
t.writeable = False
with pytest.raises(ValueError) as err:
t[1] = 5.0
assert 'Time object is read-only. Make a copy()' in str(err.value)
with pytest.raises(ValueError) as err:
t[:] = 5.0
assert 'Time object is read-only. Make a copy()' in str(err.value)
t.writeable = True
t[1] = 10.0
assert allclose_sec(t[1].value, 10.0)
# Scalar is writeable because it gets boxed into a zero-d array
t = Time('2000:001', scale='utc')
t[()] = '2000:002'
assert t.value.startswith('2000:002')
# Transformed attribute is not writeable
t = Time(['2000:001', '2000:002'], scale='utc')
t2 = t.tt # t2 is read-only now because t.tt is cached
with pytest.raises(ValueError) as err:
t2[0] = '2005:001'
assert 'Time object is read-only. Make a copy()' in str(err.value)
def test_setitem_location():
loc = EarthLocation(x=[1, 2] * u.m, y=[3, 4] * u.m, z=[5, 6] * u.m)
t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc)
# Succeeds because the right hand side makes no implication about
# location and just inherits t.location
t[0, 0] = 0
assert allclose_sec(t.value, [[0, 2], [3, 4]])
# Fails because the right hand side has location=None
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-1, format='cxcsec')
assert ('cannot set to Time with different location: '
'expected location={} and '
'got location=None'.format(loc[0])) in str(err.value)
# Succeeds because the right hand side correctly sets location
t[0, 0] = Time(-2, format='cxcsec', location=loc[0])
assert allclose_sec(t.value, [[-2, 2], [3, 4]])
# Fails because the right hand side has different location
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format='cxcsec', location=loc[1])
assert ('cannot set to Time with different location: '
'expected location={} and '
'got location={}'.format(loc[0], loc[1])) in str(err.value)
# Fails because the Time has None location and RHS has defined location
t = Time([[1, 2], [3, 4]], format='cxcsec')
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format='cxcsec', location=loc[1])
assert ('cannot set to Time with different location: '
'expected location=None and '
'got location={}'.format(loc[1])) in str(err.value)
# Broadcasting works
t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc)
t[0, :] = Time([-3, -4], format='cxcsec', location=loc)
assert allclose_sec(t.value, [[-3, -4], [3, 4]])
def test_setitem_from_python_objects():
t = Time([[1, 2], [3, 4]], format='cxcsec')
assert t.cache == {}
t.iso
assert 'iso' in t.cache['format']
assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:00:02.000'],
['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']])
# Setting item clears cache
t[0, 1] = 100
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100],
[3, 4]])
assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:01:40.000'],
['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']])
# Set with a float value
t.iso
t[1, :] = 200
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100],
[200, 200]])
# Array of strings in yday format
t[:, 1] = ['1998:002', '1998:003']
assert allclose_sec(t.value, [[1, 86400 * 1],
[200, 86400 * 2]])
# Incompatible numeric value
t = Time(['2000:001', '2000:002'])
t[0] = '2001:001'
with pytest.raises(ValueError) as err:
t[0] = 100
assert 'cannot convert value to a compatible Time object' in str(err.value)
def test_setitem_from_time_objects():
"""Set from existing Time object.
"""
# Set from time object with different scale
t = Time(['2000:001', '2000:002'], scale='utc')
t2 = Time(['2000:010'], scale='tai')
t[1] = t2[0]
assert t.value[1] == t2.utc.value[0]
# Time object with different scale and format
t = Time(['2000:001', '2000:002'], scale='utc')
t2.format = 'jyear'
t[1] = t2[0]
assert t.yday[1] == t2.utc.yday[0]
def test_setitem_bad_item():
t = Time([1, 2], format='cxcsec')
with pytest.raises(IndexError):
t['asdf'] = 3
def test_setitem_deltas():
"""Setting invalidates any transform deltas"""
t = Time([1, 2], format='cxcsec')
t.delta_tdb_tt = [1, 2]
t.delta_ut1_utc = [3, 4]
t[1] = 3
assert not hasattr(t, '_delta_tdb_tt')
assert not hasattr(t, '_delta_ut1_utc')
def test_subclass():
"""Check that we can initialize subclasses with a Time instance."""
# Ref: Issue gh-#7449 and PR gh-#7453.
class _Time(Time):
pass
t1 = Time('1999-01-01T01:01:01')
t2 = _Time(t1)
assert t2.__class__ == _Time
assert t1 == t2
def test_strftime_scalar():
"""Test of Time.strftime
"""
time_string = '2010-09-03 06:00:00'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S') == time_string
def test_strftime_array():
tstrings = ['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1995-12-31 23:59:60']
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S').tolist() == tstrings
def test_strftime_array_2():
tstrings = [['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1995-12-31 23:59:60']]
tstrings = np.array(tstrings)
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert np.all(t.strftime('%Y-%m-%d %H:%M:%S') == tstrings)
assert t.strftime('%Y-%m-%d %H:%M:%S').shape == tstrings.shape
def test_strftime_leapsecond():
time_string = '1995-12-31 23:59:60'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S') == time_string
def test_strptime_scalar():
"""Test of Time.strptime
"""
time_string = '2007-May-04 21:08:12'
time_object = Time('2007-05-04 21:08:12')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S')
assert t == time_object
def test_strptime_array():
"""Test of Time.strptime
"""
tstrings = [['1998-Jan-01 00:00:01', '1998-Jan-01 00:00:02'],
['1998-Jan-01 00:00:03', '1998-Jan-01 00:00:04']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1998-01-01 00:00:04']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_badinput():
tstrings = [1, 2, 3]
with pytest.raises(TypeError):
Time.strptime(tstrings, '%S')
def test_strptime_input_bytes_scalar():
time_string = b'2007-May-04 21:08:12'
time_object = Time('2007-05-04 21:08:12')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S')
assert t == time_object
def test_strptime_input_bytes_array():
tstrings = [[b'1998-Jan-01 00:00:01', b'1998-Jan-01 00:00:02'],
[b'1998-Jan-01 00:00:03', b'1998-Jan-01 00:00:04']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1998-01-01 00:00:04']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_leapsecond():
time_obj1 = Time('1995-12-31T23:59:60', format='isot')
time_obj2 = Time.strptime('1995-Dec-31 23:59:60', '%Y-%b-%d %H:%M:%S')
assert time_obj1 == time_obj2
def test_strptime_3_digit_year():
time_obj1 = Time('0995-12-31T00:00:00', format='isot', scale='tai')
time_obj2 = Time.strptime('0995-Dec-31 00:00:00', '%Y-%b-%d %H:%M:%S',
scale='tai')
assert time_obj1 == time_obj2
def test_strptime_fracsec_scalar():
time_string = '2007-May-04 21:08:12.123'
time_object = Time('2007-05-04 21:08:12.123')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S.%f')
assert t == time_object
def test_strptime_fracsec_array():
"""Test of Time.strptime
"""
tstrings = [['1998-Jan-01 00:00:01.123', '1998-Jan-01 00:00:02.000001'],
['1998-Jan-01 00:00:03.000900', '1998-Jan-01 00:00:04.123456']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01.123', '1998-01-01 00:00:02.000001'],
['1998-01-01 00:00:03.000900', '1998-01-01 00:00:04.123456']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S.%f')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strftime_scalar_fracsec():
"""Test of Time.strftime
"""
time_string = '2010-09-03 06:00:00.123'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == time_string
def test_strftime_scalar_fracsec_precision():
time_string = '2010-09-03 06:00:00.123123123'
t = Time(time_string)
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == '2010-09-03 06:00:00.123'
t.precision = 9
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == '2010-09-03 06:00:00.123123123'
def test_strftime_array_fracsec():
tstrings = ['2010-09-03 00:00:00.123000', '2005-09-03 06:00:00.000001',
'1995-12-31 23:59:60.000900']
t = Time(tstrings)
t.precision = 6
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S.%f').tolist() == tstrings
def test_insert_time():
tm = Time([1, 2], format='unix')
# Insert a scalar using an auto-parsed string
tm2 = tm.insert(1, '1970-01-01 00:01:00')
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert scalar using a Time value
tm2 = tm.insert(1, Time('1970-01-01 00:01:00'))
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert length=1 array with a Time value
tm2 = tm.insert(1, [Time('1970-01-01 00:01:00')])
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert length=2 list with float values matching unix format.
# Also actually provide axis=0 unlike all other tests.
tm2 = tm.insert(1, [10, 20], axis=0)
assert np.all(tm2 == Time([1, 10, 20, 2], format='unix'))
# Insert length=2 np.array with float values matching unix format
tm2 = tm.insert(1, np.array([10, 20]))
assert np.all(tm2 == Time([1, 10, 20, 2], format='unix'))
# Insert length=2 np.array with float values at the end
tm2 = tm.insert(2, np.array([10, 20]))
assert np.all(tm2 == Time([1, 2, 10, 20], format='unix'))
# Insert length=2 np.array with float values at the beginning
# with a negative index
tm2 = tm.insert(-2, np.array([10, 20]))
assert np.all(tm2 == Time([10, 20, 1, 2], format='unix'))
def test_insert_exceptions():
tm = Time(1, format='unix')
with pytest.raises(TypeError) as err:
tm.insert(0, 50)
assert 'cannot insert into scalar' in str(err.value)
tm = Time([1, 2], format='unix')
with pytest.raises(ValueError) as err:
tm.insert(0, 50, axis=1)
assert 'axis must be 0' in str(err.value)
with pytest.raises(TypeError) as err:
tm.insert(slice(None), 50)
assert 'obj arg must be an integer' in str(err.value)
with pytest.raises(IndexError) as err:
tm.insert(-100, 50)
assert 'index -100 is out of bounds for axis 0 with size 2' in str(err.value)
def test_datetime64_no_format():
dt64 = np.datetime64('2000-01-02T03:04:05.123456789')
t = Time(dt64, scale='utc', precision=9)
assert t.iso == '2000-01-02 03:04:05.123456789'
assert t.datetime64 == dt64
assert t.value == dt64
def test_hash_time():
loc1 = EarthLocation(1 * u.m, 2 * u.m, 3 * u.m)
for loc in None, loc1:
t = Time([1, 1, 2, 3], format='cxcsec', location=loc)
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'Time' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'Time' (value is masked)"
t = Time(1, format='cxcsec', location=loc)
t2 = Time(1, format='cxcsec')
assert hash(t) != hash(t2)
t = Time('2000:180', scale='utc')
t2 = Time(t, scale='tai')
assert t == t2
assert hash(t) != hash(t2)
def test_hash_time_delta():
t = TimeDelta([1, 1, 2, 3], format='sec')
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (value is masked)"
def test_get_time_fmt_exception_messages():
with pytest.raises(ValueError) as err:
Time(10)
assert "No time format was given, and the input is" in str(err.value)
with pytest.raises(ValueError) as err:
Time('2000:001', format='not-a-format')
assert "Format 'not-a-format' is not one of the allowed" in str(err.value)
with pytest.raises(ValueError) as err:
Time('200')
assert 'Input values did not match any of the formats where' in str(err.value)
with pytest.raises(ValueError) as err:
Time('200', format='iso')
assert ('Input values did not match the format class iso:' + os.linesep
+ 'ValueError: Time 200 does not match iso format') == str(err.value)
with pytest.raises(ValueError) as err:
Time(200, format='iso')
assert ('Input values did not match the format class iso:' + os.linesep
+ 'TypeError: Input values for iso class must be strings') == str(err.value)
def test_ymdhms_defaults():
t1 = Time({'year': 2001}, format='ymdhms')
assert t1 == Time('2001-01-01')
times_dict_ns = {
'year': [2001, 2002],
'month': [2, 3],
'day': [4, 5],
'hour': [6, 7],
'minute': [8, 9],
'second': [10, 11]
}
table_ns = Table(times_dict_ns)
struct_array_ns = table_ns.as_array()
rec_array_ns = struct_array_ns.view(np.recarray)
ymdhms_names = ('year', 'month', 'day', 'hour', 'minute', 'second')
@pytest.mark.parametrize('tm_input', [table_ns, struct_array_ns, rec_array_ns])
@pytest.mark.parametrize('kwargs', [{}, {'format': 'ymdhms'}])
@pytest.mark.parametrize('as_row', [False, True])
def test_ymdhms_init_from_table_like(tm_input, kwargs, as_row):
time_ns = Time(['2001-02-04 06:08:10', '2002-03-05 07:09:11'])
if as_row:
tm_input = tm_input[0]
time_ns = time_ns[0]
tm = Time(tm_input, **kwargs)
assert np.all(tm == time_ns)
assert tm.value.dtype.names == ymdhms_names
def test_ymdhms_init_from_dict_array():
times_dict_shape = {
'year': [[2001, 2002],
[2003, 2004]],
'month': [2, 3],
'day': 4
}
time_shape = Time(
[['2001-02-04', '2002-03-04'],
['2003-02-04', '2004-03-04']]
)
time = Time(times_dict_shape, format='ymdhms')
assert np.all(time == time_shape)
assert time.ymdhms.shape == time_shape.shape
@pytest.mark.parametrize('kwargs', [{}, {'format': 'ymdhms'}])
def test_ymdhms_init_from_dict_scalar(kwargs):
"""
Test YMDHMS functionality for a dict input. This includes ensuring that
key and attribute access work. For extra fun use a time within a leap
second.
"""
time_dict = {
'year': 2016,
'month': 12,
'day': 31,
'hour': 23,
'minute': 59,
'second': 60.123456789}
tm = Time(time_dict, **kwargs)
assert tm == Time('2016-12-31T23:59:60.123456789')
for attr in time_dict:
for value in (tm.value[attr], getattr(tm.value, attr)):
if attr == 'second':
assert allclose_sec(time_dict[attr], value)
else:
assert time_dict[attr] == value
# Now test initializing from a YMDHMS format time using the object
tm_rt = Time(tm)
assert tm_rt == tm
assert tm_rt.format == 'ymdhms'
# Test initializing from a YMDHMS value (np.void, i.e. recarray row)
# without specified format.
tm_rt = Time(tm.ymdhms)
assert tm_rt == tm
assert tm_rt.format == 'ymdhms'
def test_ymdhms_exceptions():
with pytest.raises(ValueError, match='input must be dict or table-like'):
Time(10, format='ymdhms')
match = "'wrong' not allowed as YMDHMS key name(s)"
# NB: for reasons unknown, using match=match in pytest.raises() fails, so we
# fall back to old school ``match in str(err.value)``.
with pytest.raises(ValueError) as err:
Time({'year': 2019, 'wrong': 1}, format='ymdhms')
assert match in str(err.value)
match = "for 2 input key names you must supply 'year', 'month'"
with pytest.raises(ValueError, match=match):
Time({'year': 2019, 'minute': 1}, format='ymdhms')
def test_ymdhms_masked():
tm = Time({'year': [2000, 2001]}, format='ymdhms')
tm[0] = np.ma.masked
assert isinstance(tm.value[0], np.ma.core.mvoid)
for name in ymdhms_names:
assert tm.value[0][name] is np.ma.masked
# Converted from doctest in astropy/test/formats.py for debugging
def test_ymdhms_output():
t = Time({'year': 2015, 'month': 2, 'day': 3,
'hour': 12, 'minute': 13, 'second': 14.567},
scale='utc')
# NOTE: actually comes back as np.void for some reason
# NOTE: not necessarily a python int; might be an int32
assert t.ymdhms.year == 2015
@pytest.mark.parametrize('fmt', TIME_FORMATS)
def test_write_every_format_to_ecsv(fmt):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = (Time('2020-01-01')
+ [[1, 1 / 7],
[3, 4.5]] * u.s)
tm.format = fmt
t['a'] = tm
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t['a'].format == t2['a'].format
# Some loss of precision in the serialization
assert not np.all(t['a'] == t2['a'])
# But no loss in the format representation
assert np.all(t['a'].value == t2['a'].value)
@pytest.mark.parametrize('fmt', TIME_FORMATS)
def test_write_every_format_to_fits(fmt, tmp_path):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = (Time('2020-01-01')
+ [[1, 1 / 7],
[3, 4.5]] * u.s)
tm.format = fmt
t['a'] = tm
out = tmp_path / 'out.fits'
t.write(out, format='fits')
t2 = Table.read(out, format='fits', astropy_native=True)
# Currently the format is lost in FITS so set it back
t2['a'].format = fmt
# No loss of precision in the serialization or representation
assert np.all(t['a'] == t2['a'])
assert np.all(t['a'].value == t2['a'].value)
@pytest.mark.skipif(not HAS_H5PY, reason='Needs h5py')
@pytest.mark.parametrize('fmt', TIME_FORMATS)
def test_write_every_format_to_hdf5(fmt, tmp_path):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = (Time('2020-01-01')
+ [[1, 1 / 7],
[3, 4.5]] * u.s)
tm.format = fmt
t['a'] = tm
out = tmp_path / 'out.h5'
t.write(str(out), format='hdf5', path='root', serialize_meta=True)
t2 = Table.read(str(out), format='hdf5', path='root')
assert t['a'].format == t2['a'].format
# No loss of precision in the serialization or representation
assert np.all(t['a'] == t2['a'])
assert np.all(t['a'].value == t2['a'].value)
# There are two stages of validation now - one on input into a format, so that
# the format conversion code has tidy matched arrays to work with, and the
# other when object construction does not go through a format object. Or at
# least, the format object is constructed with "from_jd=True". In this case the
# normal input validation does not happen but the new input validation does,
# and can ensure that strange broadcasting anomalies can't happen.
# This form of construction uses from_jd=True.
def test_broadcasting_writeable():
t = Time('J2015') + np.linspace(-1, 1, 10) * u.day
t[2] = Time(58000, format="mjd")
def test_format_subformat_compatibility():
"""Test that changing format with out_subfmt defined is not a problem.
See #9812, #9810."""
t = Time('2019-12-20', out_subfmt='date_??')
assert t.mjd == 58837.0
assert t.yday == '2019:354:00:00' # Preserves out_subfmt
t2 = t.replicate(format='mjd')
assert t2.out_subfmt == '*' # Changes to default
t2 = t.copy(format='mjd')
assert t2.out_subfmt == '*'
t2 = Time(t, format='mjd')
assert t2.out_subfmt == '*'
t2 = t.copy(format='yday')
assert t2.out_subfmt == 'date_??'
assert t2.value == '2019:354:00:00'
t.format = 'yday'
assert t.value == '2019:354:00:00'
assert t.out_subfmt == 'date_??'
t = Time('2019-12-20', out_subfmt='date')
assert t.mjd == 58837.0
assert t.yday == '2019:354'
@pytest.mark.parametrize('fmt_name,fmt_class', TIME_FORMATS.items())
def test_to_value_with_subfmt_for_every_format(fmt_name, fmt_class):
"""From a starting Time value, test that every valid combination of
to_value(format, subfmt) works. See #9812, #9361.
"""
t = Time('2000-01-01')
subfmts = list(subfmt[0] for subfmt in fmt_class.subfmts) + [None, '*']
for subfmt in subfmts:
t.to_value(fmt_name, subfmt)
@pytest.mark.parametrize('location', [None, (45, 45)])
def test_location_init(location):
"""Test fix in #9969 for issue #9962 where the location attribute is
lost when initializing Time from an existing Time instance of list of
Time instances.
"""
tm = Time('J2010', location=location)
# Init from a scalar Time
tm2 = Time(tm)
assert np.all(tm.location == tm2.location)
assert type(tm.location) is type(tm2.location) # noqa
# From a list of Times
tm2 = Time([tm, tm])
if location is None:
assert tm2.location is None
else:
for loc in tm2.location:
assert loc == tm.location
assert type(tm.location) is type(tm2.location) # noqa
# Effectively the same as a list of Times, but just to be sure that
# Table mixin inititialization is working as expected.
tm2 = Table([[tm, tm]])['col0']
if location is None:
assert tm2.location is None
else:
for loc in tm2.location:
assert loc == tm.location
assert type(tm.location) is type(tm2.location) # noqa
def test_location_init_fail():
"""Test fix in #9969 for issue #9962 where the location attribute is
lost when initializing Time from an existing Time instance of list of
Time instances. Make sure exception is correct.
"""
tm = Time('J2010', location=(45, 45))
tm2 = Time('J2010')
with pytest.raises(ValueError,
match='cannot concatenate times unless all locations'):
Time([tm, tm2])
def test_linspace():
"""Test `np.linspace` `__array_func__` implementation for scalar and arrays.
"""
t1 = Time(['2021-01-01 00:00:00', '2021-01-02 00:00:00'])
t2 = Time(['2021-01-01 01:00:00', '2021-12-28 00:00:00'])
atol = 1 * u.ps
ts = np.linspace(t1[0], t2[0], 3)
assert ts[0].isclose(Time('2021-01-01 00:00:00'), atol=atol)
assert ts[1].isclose(Time('2021-01-01 00:30:00'), atol=atol)
assert ts[2].isclose(Time('2021-01-01 01:00:00'), atol=atol)
ts = np.linspace(t1, t2[0], 2, endpoint=False)
assert ts.shape == (2, 2)
assert all(ts[0].isclose(Time(['2021-01-01 00:00:00', '2021-01-02 00:00:00']), atol=atol))
assert all(ts[1].isclose(Time(['2021-01-01 00:30:00', '2021-01-01 12:30:00']), atol=atol*10))
ts = np.linspace(t1, t2, 7)
assert ts.shape == (7, 2)
assert all(ts[0].isclose(Time(['2021-01-01 00:00:00', '2021-01-02 00:00:00']), atol=atol))
assert all(ts[1].isclose(Time(['2021-01-01 00:10:00', '2021-03-03 00:00:00']), atol=atol*300))
assert all(ts[5].isclose(Time(['2021-01-01 00:50:00', '2021-10-29 00:00:00']), atol=atol*3000))
assert all(ts[6].isclose(Time(['2021-01-01 01:00:00', '2021-12-28 00:00:00']), atol=atol))
def test_linspace_steps():
"""Test `np.linspace` `retstep` option.
"""
t1 = Time(['2021-01-01 00:00:00', '2021-01-01 12:00:00'])
t2 = Time('2021-01-02 00:00:00')
atol = 1 * u.ps
ts, st = np.linspace(t1, t2, 7, retstep=True)
assert ts.shape == (7, 2)
assert st.shape == (2,)
assert all(ts[1].isclose(ts[0] + st, atol=atol))
assert all(ts[6].isclose(ts[0] + 6 * st, atol=atol))
assert all(st.isclose(TimeDelta([14400, 7200], format='sec'), atol=atol))
def test_linspace_fmts():
"""Test `np.linspace` `__array_func__` implementation for start/endpoints
from different formats/systems.
"""
t1 = Time(['2020-01-01 00:00:00', '2020-01-02 00:00:00'])
t2 = Time(2458850, format='jd')
t3 = Time(1578009600, format='unix')
atol = 1 * u.ps
ts = np.linspace(t1, t2, 3)
assert ts.shape == (3, 2)
assert all(ts[0].isclose(Time(['2020-01-01 00:00:00', '2020-01-02 00:00:00']), atol=atol))
assert all(ts[1].isclose(Time(['2020-01-01 06:00:00', '2020-01-01 18:00:00']), atol=atol))
assert all(ts[2].isclose(Time(['2020-01-01 12:00:00', '2020-01-01 12:00:00']), atol=atol))
ts = np.linspace(t1, Time([t2, t3]), 3)
assert ts.shape == (3, 2)
assert all(ts[0].isclose(Time(['2020-01-01 00:00:00', '2020-01-02 00:00:00']), atol=atol))
assert all(ts[1].isclose(Time(['2020-01-01 06:00:00', '2020-01-02 12:00:00']), atol=atol))
assert all(ts[2].isclose(Time(['2020-01-01 12:00:00', '2020-01-03 00:00:00']), atol=atol))
|
f3f1bb6fd1bfb14f04faebe2b49f5ce0a34fa988b11864208d2fafaa404f7494 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import itertools
import pytest
import numpy as np
import erfa
from astropy import units as u
from astropy.time import Time
from astropy.time.core import SIDEREAL_TIME_MODELS
from astropy.utils import iers
allclose_hours = functools.partial(np.allclose, rtol=1e-15, atol=3e-8)
# 0.1 ms atol; IERS-B files change at that level.
within_1_second = functools.partial(np.allclose, rtol=1., atol=1. / 3600.)
within_2_seconds = functools.partial(np.allclose, rtol=1., atol=2. / 3600.)
def test_doc_string_contains_models():
"""The doc string is formatted; this ensures this remains working."""
for kind in ('mean', 'apparent'):
for model in SIDEREAL_TIME_MODELS[kind]:
assert model in Time.sidereal_time.__doc__
class TestERFATestCases:
"""Test that we reproduce the test cases given in erfa/src/t_erfa_c.c"""
def setup_class(cls):
# Sidereal time tests use the following JD inputs.
cls.time_ut1 = Time(2400000.5, 53736.0, scale='ut1', format='jd')
cls.time_tt = Time(2400000.5, 53736.0, scale='tt', format='jd')
# but tt!=ut1 at these dates, unlike what is assumed, so we cannot
# reproduce this exactly. Now it does not really matter,
# but may as well fake this (and avoid IERS table lookup here)
cls.time_ut1.delta_ut1_utc = 0.
cls.time_ut1.delta_ut1_utc = 24 * 3600 * (
(cls.time_ut1.tt.jd1 - cls.time_tt.jd1)
+ (cls.time_ut1.tt.jd2 - cls.time_tt.jd2))
def test_setup(self):
assert np.allclose((self.time_ut1.tt.jd1 - self.time_tt.jd1)
+ (self.time_ut1.tt.jd2 - self.time_tt.jd2),
0., atol=1.e-14)
@pytest.mark.parametrize('erfa_test_input',
((1.754174972210740592, 1e-12, "eraGmst00"),
(1.754174971870091203, 1e-12, "eraGmst06"),
(1.754174981860675096, 1e-12, "eraGmst82"),
(1.754166138018281369, 1e-12, "eraGst00a"),
(1.754166136510680589, 1e-12, "eraGst00b"),
(1.754166137675019159, 1e-12, "eraGst06a"),
(1.754166136020645203, 1e-12, "eraGst94")))
def test_iau_models(self, erfa_test_input):
result, precision, name = erfa_test_input
if name[4] == 'm':
kind = 'mean'
model_name = f"IAU{20 if name[7] == '0' else 19:2d}{name[7:]:s}"
else:
kind = 'apparent'
model_name = f"IAU{20 if name[6] == '0' else 19:2d}{name[6:].upper():s}"
assert kind in SIDEREAL_TIME_MODELS.keys()
assert model_name in SIDEREAL_TIME_MODELS[kind]
gst = self.time_ut1.sidereal_time(kind, 'greenwich', model_name)
assert np.allclose(gst.to_value('radian'), result,
rtol=1., atol=precision)
def test_era(self):
# Separate since it does not use the same time.
time_ut1 = Time(2400000.5, 54388.0, format='jd', scale='ut1')
era = time_ut1.earth_rotation_angle('tio')
expected = 0.4022837240028158102
assert np.abs(era.to_value(u.radian) - expected) < 1e-12
class TestST:
"""Test Greenwich Sidereal Time. Unlike above, this is relative to
what was found earlier, so checks changes in implementation, including
leap seconds, rather than correctness"""
@classmethod
def setup_class(cls):
cls.orig_auto_download = iers.conf.auto_download
iers.conf.auto_download = False
cls.t1 = Time(['2012-06-30 12:00:00', '2012-06-30 23:59:59',
'2012-06-30 23:59:60', '2012-07-01 00:00:00',
'2012-07-01 12:00:00'], scale='utc')
cls.t2 = Time(cls.t1, location=('120d', '10d'))
@classmethod
def teardown_class(cls):
iers.conf.auto_download = cls.orig_auto_download
def test_gmst(self):
"""Compare Greenwich Mean Sidereal Time with what was found earlier
"""
gmst_compare = np.array([6.5968497894730564, 18.629426164144697,
18.629704702452862, 18.629983240761003,
6.6628381828899643])
gmst = self.t1.sidereal_time('mean', 'greenwich')
assert allclose_hours(gmst.value, gmst_compare)
def test_gst(self):
"""Compare Greenwich Apparent Sidereal Time with what was found earlier
"""
gst_compare = np.array([6.5971168570494854, 18.629694220878296,
18.62997275921186, 18.630251297545389,
6.6631074284018244])
gst = self.t1.sidereal_time('apparent', 'greenwich')
assert allclose_hours(gst.value, gst_compare)
def test_era(self):
"""Comare ERA relative to erfa.era00 test case."""
t = Time(2400000.5, 54388.0, format='jd', location=(0, 0), scale='ut1')
era = t.earth_rotation_angle()
expected = 0.4022837240028158102 * u.radian
# Without the TIO locator/polar motion, this should be close already.
assert np.abs(era - expected) < 1e-10 * u.radian
# And with it, one should reach full precision.
sp = erfa.sp00(t.tt.jd1, t.tt.jd2)
iers_table = iers.earth_orientation_table.get()
xp, yp = (c.to_value(u.rad) for c in iers_table.pm_xy(t))
r = erfa.rx(-yp, erfa.ry(-xp, erfa.rz(sp, np.eye(3))))
expected1 = expected + (np.arctan2(r[0, 1], r[0, 0]) << u.radian)
assert np.abs(era - expected1) < 1e-12 * u.radian
# Now try at a longitude different from 0.
t2 = Time(2400000.5, 54388.0, format='jd', location=(45, 0), scale='ut1')
era2 = t2.earth_rotation_angle()
r2 = erfa.rz(np.deg2rad(45), r)
expected2 = expected + (np.arctan2(r2[0, 1], r2[0, 0]) << u.radian)
assert np.abs(era2 - expected2) < 1e-12 * u.radian
def test_gmst_gst_close(self):
"""Check that Mean and Apparent are within a few seconds."""
gmst = self.t1.sidereal_time('mean', 'greenwich')
gst = self.t1.sidereal_time('apparent', 'greenwich')
assert within_2_seconds(gst.value, gmst.value)
def test_gmst_era_close(self):
"""Check that mean sidereal time and earth rotation angle are close."""
gmst = self.t1.sidereal_time('mean', 'greenwich')
era = self.t1.earth_rotation_angle('tio')
assert within_2_seconds(era.value, gmst.value)
def test_gmst_independent_of_self_location(self):
"""Check that Greenwich time does not depend on self.location"""
gmst1 = self.t1.sidereal_time('mean', 'greenwich')
gmst2 = self.t2.sidereal_time('mean', 'greenwich')
assert allclose_hours(gmst1.value, gmst2.value)
def test_gmst_vs_lmst(self):
"""Check that Greenwich and local sidereal time differ."""
gmst = self.t1.sidereal_time('mean', 'greenwich')
lmst = self.t1.sidereal_time('mean', 0)
assert allclose_hours(lmst.value, gmst.value)
assert np.all(np.abs(lmst-gmst) > 1e-10 * u.hourangle)
@pytest.mark.parametrize('kind', ('mean', 'apparent'))
def test_lst(self, kind):
"""Compare Local Sidereal Time with what was found earlier,
as well as with what is expected from GMST
"""
lst_compare = {
'mean': np.array([14.596849789473058, 2.629426164144693,
2.6297047024528588, 2.6299832407610033,
14.662838182889967]),
'apparent': np.array([14.597116857049487, 2.6296942208782959,
2.6299727592118565, 2.6302512975453887,
14.663107428401826])}
gmst2 = self.t2.sidereal_time(kind, 'greenwich')
lmst2 = self.t2.sidereal_time(kind)
assert allclose_hours(lmst2.value, lst_compare[kind])
assert allclose_hours((lmst2 - gmst2).wrap_at('12h').value,
self.t2.location.lon.to_value('hourangle'))
# check it also works when one gives longitude explicitly
lmst1 = self.t1.sidereal_time(kind, self.t2.location.lon)
assert allclose_hours(lmst1.value, lst_compare[kind])
def test_lst_string_longitude(self):
lmst1 = self.t1.sidereal_time('mean', longitude='120d')
lmst2 = self.t2.sidereal_time('mean')
assert allclose_hours(lmst1.value, lmst2.value)
def test_lst_needs_location(self):
with pytest.raises(ValueError):
self.t1.sidereal_time('mean')
with pytest.raises(ValueError):
self.t1.sidereal_time('mean', None)
def test_lera(self):
lera_compare = np.array([14.586176631122177, 2.618751847545134,
2.6190303858265067, 2.619308924107852,
14.652162695594276])
gera2 = self.t2.earth_rotation_angle('tio')
lera2 = self.t2.earth_rotation_angle()
assert allclose_hours(lera2.value, lera_compare)
assert allclose_hours((lera2 - gera2).wrap_at('12h').value,
self.t2.location.lon.to_value('hourangle'))
# Check it also works when one gives location explicitly.
# This also verifies input of a location generally.
lera1 = self.t1.earth_rotation_angle(self.t2.location)
assert allclose_hours(lera1.value, lera_compare)
class TestModelInterpretation:
"""Check that models are different, and that wrong models are recognized"""
@classmethod
def setup_class(cls):
cls.orig_auto_download = iers.conf.auto_download
iers.conf.auto_download = False
cls.t = Time(['2012-06-30 12:00:00'], scale='utc',
location=('120d', '10d'))
@classmethod
def teardown_class(cls):
iers.conf.auto_download = cls.orig_auto_download
@pytest.mark.parametrize('kind', ('mean', 'apparent'))
def test_model_uniqueness(self, kind):
"""Check models give different answers, yet are close."""
for model1, model2 in itertools.combinations(
SIDEREAL_TIME_MODELS[kind].keys(), 2):
gst1 = self.t.sidereal_time(kind, 'greenwich', model1)
gst2 = self.t.sidereal_time(kind, 'greenwich', model2)
assert np.all(gst1.value != gst2.value)
assert within_1_second(gst1.value, gst2.value)
lst1 = self.t.sidereal_time(kind, None, model1)
lst2 = self.t.sidereal_time(kind, None, model2)
assert np.all(lst1.value != lst2.value)
assert within_1_second(lst1.value, lst2.value)
@pytest.mark.parametrize(('kind', 'other'), (('mean', 'apparent'),
('apparent', 'mean')))
def test_wrong_models_raise_exceptions(self, kind, other):
with pytest.raises(ValueError):
self.t.sidereal_time(kind, 'greenwich', 'nonsense')
for model in (set(SIDEREAL_TIME_MODELS[other].keys())
- set(SIDEREAL_TIME_MODELS[kind].keys())):
with pytest.raises(ValueError):
self.t.sidereal_time(kind, 'greenwich', model)
with pytest.raises(ValueError):
self.t.sidereal_time(kind, None, model)
|
e91a4c0ed98ee734d8b02abeac85f52a9bae5776a795e9e0bf41dbdce20d38b2 | # Licensed under a 3-clause BSD style license - see LICNSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""Handles the CDS string format for units."""
import operator
import os
import re
from astropy.units.utils import is_effectively_unity
from astropy.utils import classproperty, parsing
from astropy.utils.misc import did_you_mean
from . import core, utils
from .base import Base
class CDS(Base):
"""
Support the `Centre de Données astronomiques de Strasbourg
<http://cds.u-strasbg.fr/>`_ `Standards for Astronomical
Catalogues 2.0 <http://vizier.u-strasbg.fr/vizier/doc/catstd-3.2.htx>`_
format, and the `complete set of supported units
<https://vizier.u-strasbg.fr/viz-bin/Unit>`_. This format is used
by VOTable up to version 1.2.
"""
_tokens = (
'PRODUCT',
'DIVISION',
'OPEN_PAREN',
'CLOSE_PAREN',
'OPEN_BRACKET',
'CLOSE_BRACKET',
'X',
'SIGN',
'UINT',
'UFLOAT',
'UNIT',
'DIMENSIONLESS'
)
@classproperty(lazy=True)
def _units(cls):
return cls._generate_unit_names()
@classproperty(lazy=True)
def _parser(cls):
return cls._make_parser()
@classproperty(lazy=True)
def _lexer(cls):
return cls._make_lexer()
@staticmethod
def _generate_unit_names():
from astropy import units as u
from astropy.units import cds
names = {}
for key, val in cds.__dict__.items():
if isinstance(val, u.UnitBase):
names[key] = val
return names
@classmethod
def _make_lexer(cls):
tokens = cls._tokens
t_PRODUCT = r'\.'
t_DIVISION = r'/'
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
t_OPEN_BRACKET = r'\['
t_CLOSE_BRACKET = r'\]'
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r'((\d+\.?\d+)|(\.\d+))([eE][+-]?\d+)?'
if not re.search(r'[eE\.]', t.value):
t.type = 'UINT'
t.value = int(t.value)
else:
t.value = float(t.value)
return t
def t_UINT(t):
r'\d+'
t.value = int(t.value)
return t
def t_SIGN(t):
r'[+-](?=\d)'
t.value = float(t.value + '1')
return t
def t_X(t): # multiplication for factor in front of unit
r'[x×]'
return t
def t_UNIT(t):
r'\%|°|\\h|((?!\d)\w)+'
t.value = cls._get_unit(t)
return t
def t_DIMENSIONLESS(t):
r'---|-'
# These are separate from t_UNIT since they cannot have a prefactor.
t.value = cls._get_unit(t)
return t
t_ignore = ''
# Error handling rule
def t_error(t):
raise ValueError(
f"Invalid character at col {t.lexpos}")
return parsing.lex(lextab='cds_lextab', package='astropy/units',
reflags=int(re.UNICODE))
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the `Standards
for Astronomical Catalogues 2.0
<http://vizier.u-strasbg.fr/vizier/doc/catstd-3.2.htx>`_, which is not
terribly precise. The exact grammar is here is based on the
YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
"""
tokens = cls._tokens
def p_main(p):
'''
main : factor combined_units
| combined_units
| DIMENSIONLESS
| OPEN_BRACKET combined_units CLOSE_BRACKET
| OPEN_BRACKET DIMENSIONLESS CLOSE_BRACKET
| factor
'''
from astropy.units import dex
from astropy.units.core import Unit
if len(p) == 3:
p[0] = Unit(p[1] * p[2])
elif len(p) == 4:
p[0] = dex(p[2])
else:
p[0] = Unit(p[1])
def p_combined_units(p):
'''
combined_units : product_of_units
| division_of_units
'''
p[0] = p[1]
def p_product_of_units(p):
'''
product_of_units : unit_expression PRODUCT combined_units
| unit_expression
'''
if len(p) == 4:
p[0] = p[1] * p[3]
else:
p[0] = p[1]
def p_division_of_units(p):
'''
division_of_units : DIVISION unit_expression
| unit_expression DIVISION combined_units
'''
if len(p) == 3:
p[0] = p[2] ** -1
else:
p[0] = p[1] / p[3]
def p_unit_expression(p):
'''
unit_expression : unit_with_power
| OPEN_PAREN combined_units CLOSE_PAREN
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
def p_factor(p):
'''
factor : signed_float X UINT signed_int
| UINT X UINT signed_int
| UINT signed_int
| UINT
| signed_float
'''
if len(p) == 5:
if p[3] != 10:
raise ValueError(
"Only base ten exponents are allowed in CDS")
p[0] = p[1] * 10.0 ** p[4]
elif len(p) == 3:
if p[1] != 10:
raise ValueError(
"Only base ten exponents are allowed in CDS")
p[0] = 10.0 ** p[2]
elif len(p) == 2:
p[0] = p[1]
def p_unit_with_power(p):
'''
unit_with_power : UNIT numeric_power
| UNIT
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[1] ** p[2]
def p_numeric_power(p):
'''
numeric_power : sign UINT
'''
p[0] = p[1] * p[2]
def p_sign(p):
'''
sign : SIGN
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_signed_int(p):
'''
signed_int : SIGN UINT
'''
p[0] = p[1] * p[2]
def p_signed_float(p):
'''
signed_float : sign UINT
| sign UFLOAT
'''
p[0] = p[1] * p[2]
def p_error(p):
raise ValueError()
return parsing.yacc(tabmodule='cds_parsetab', package='astropy/units')
@classmethod
def _get_unit(cls, t):
try:
return cls._parse_unit(t.value)
except ValueError as e:
registry = core.get_current_unit_registry()
if t.value in registry.aliases:
return registry.aliases[t.value]
raise ValueError(
f"At col {t.lexpos}, {str(e)}")
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
"Unit '{}' not supported by the CDS SAC "
"standard. {}".format(
unit, did_you_mean(
unit, cls._units)))
else:
raise ValueError()
return cls._units[unit]
@classmethod
def parse(cls, s, debug=False):
if ' ' in s:
raise ValueError('CDS unit must not contain whitespace')
if not isinstance(s, str):
s = s.decode('ascii')
# This is a short circuit for the case where the string
# is just a single unit name
try:
return cls._parse_unit(s, detailed_exception=False)
except ValueError:
try:
return cls._parser.parse(s, lexer=cls._lexer, debug=debug)
except ValueError as e:
if str(e):
raise ValueError(str(e))
else:
raise ValueError("Syntax error")
@staticmethod
def _get_unit_name(unit):
return unit.get_format_name('cds')
@classmethod
def _format_unit_list(cls, units):
out = []
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
out.append(f'{cls._get_unit_name(base)}{int(power)}')
return '.'.join(out)
@classmethod
def to_string(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
if unit == core.dimensionless_unscaled:
return '---'
elif is_effectively_unity(unit.scale*100.):
return '%'
if unit.scale == 1:
s = ''
else:
m, e = utils.split_mantissa_exponent(unit.scale)
parts = []
if m not in ('', '1'):
parts.append(m)
if e:
if not e.startswith('-'):
e = "+" + e
parts.append(f'10{e}')
s = 'x'.join(parts)
pairs = list(zip(unit.bases, unit.powers))
if len(pairs) > 0:
pairs.sort(key=operator.itemgetter(1), reverse=True)
s += cls._format_unit_list(pairs)
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
|
c18a6b2eaf36843eeede9d26c15910a798064e0fb1c7cd3dcaaee40111b6701c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "Console" unit format.
"""
from . import base, core, utils
class Console(base.Base):
"""
Output-only format for to display pretty formatting at the
console.
For example::
>>> import astropy.units as u
>>> print(u.Ry.decompose().to_string('console')) # doctest: +FLOAT_CMP
m^2 kg
2.1798721*10^-18 ------
s^2
"""
_times = "*"
_line = "-"
@classmethod
def _get_unit_name(cls, unit):
return unit.get_format_name('console')
@classmethod
def _format_superscript(cls, number):
return f'^{number}'
@classmethod
def _format_unit_list(cls, units):
out = []
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
out.append('{}{}'.format(
cls._get_unit_name(base),
cls._format_superscript(
utils.format_power(power))))
return ' '.join(out)
@classmethod
def format_exponential_notation(cls, val):
m, ex = utils.split_mantissa_exponent(val)
parts = []
if m:
parts.append(m)
if ex:
parts.append(f"10{cls._format_superscript(ex)}")
return cls._times.join(parts)
@classmethod
def to_string(cls, unit):
if isinstance(unit, core.CompositeUnit):
if unit.scale == 1:
s = ''
else:
s = cls.format_exponential_notation(unit.scale)
if len(unit.bases):
positives, negatives = utils.get_grouped_by_powers(
unit.bases, unit.powers)
if len(negatives):
if len(positives):
positives = cls._format_unit_list(positives)
else:
positives = '1'
negatives = cls._format_unit_list(negatives)
l = len(s)
r = max(len(positives), len(negatives))
f = f"{{0:^{l}s}} {{1:^{r}s}}"
lines = [
f.format('', positives),
f.format(s, cls._line * r),
f.format('', negatives)
]
s = '\n'.join(lines)
else:
positives = cls._format_unit_list(positives)
s += positives
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
|
7381d22354adc4a6f652df06141e5537ebce362843821741b25bca1c242e2763 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "VOUnit" unit format.
"""
import copy
import keyword
import operator
import re
import warnings
from . import core, generic, utils
class VOUnit(generic.Generic):
"""
The IVOA standard for units used by the VO.
This is an implementation of `Units in the VO 1.0
<http://www.ivoa.net/documents/VOUnits/>`_.
"""
_explicit_custom_unit_regex = re.compile(
r"^[YZEPTGMkhdcmunpfazy]?'((?!\d)\w)+'$")
_custom_unit_regex = re.compile(r"^((?!\d)\w)+$")
_custom_units = {}
@staticmethod
def _generate_unit_names():
from astropy import units as u
from astropy.units import required_by_vounit as uvo
names = {}
deprecated_names = set()
bases = [
'A', 'C', 'D', 'F', 'G', 'H', 'Hz', 'J', 'Jy', 'K', 'N',
'Ohm', 'Pa', 'R', 'Ry', 'S', 'T', 'V', 'W', 'Wb', 'a',
'adu', 'arcmin', 'arcsec', 'barn', 'beam', 'bin', 'cd',
'chan', 'count', 'ct', 'd', 'deg', 'eV', 'erg', 'g', 'h',
'lm', 'lx', 'lyr', 'm', 'mag', 'min', 'mol', 'pc', 'ph',
'photon', 'pix', 'pixel', 'rad', 'rad', 's', 'solLum',
'solMass', 'solRad', 'sr', 'u', 'voxel', 'yr'
]
binary_bases = [
'bit', 'byte', 'B'
]
simple_units = [
'Angstrom', 'angstrom', 'AU', 'au', 'Ba', 'dB', 'mas'
]
si_prefixes = [
'y', 'z', 'a', 'f', 'p', 'n', 'u', 'm', 'c', 'd',
'', 'da', 'h', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'
]
binary_prefixes = [
'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei'
]
deprecated_units = {
'a', 'angstrom', 'Angstrom', 'au', 'Ba', 'barn', 'ct',
'erg', 'G', 'ph', 'pix'
}
def do_defines(bases, prefixes, skips=[]):
for base in bases:
for prefix in prefixes:
key = prefix + base
if key in skips:
continue
if keyword.iskeyword(key):
continue
names[key] = getattr(u if hasattr(u, key) else uvo, key)
if base in deprecated_units:
deprecated_names.add(key)
do_defines(bases, si_prefixes, ['pct', 'pcount', 'yd'])
do_defines(binary_bases, si_prefixes + binary_prefixes, ['dB', 'dbyte'])
do_defines(simple_units, [''])
return names, deprecated_names, []
@classmethod
def parse(cls, s, debug=False):
if s in ('unknown', 'UNKNOWN'):
return None
if s == '':
return core.dimensionless_unscaled
# Check for excess solidi, but exclude fractional exponents (allowed)
if (s.count('/') > 1 and
s.count('/') - len(re.findall(r'\(\d+/\d+\)', s)) > 1):
raise core.UnitsError(
"'{}' contains multiple slashes, which is "
"disallowed by the VOUnit standard".format(s))
result = cls._do_parse(s, debug=debug)
if hasattr(result, 'function_unit'):
raise ValueError("Function units are not yet supported in "
"VOUnit.")
return result
@classmethod
def _get_unit(cls, t):
try:
return super()._get_unit(t)
except ValueError:
if cls._explicit_custom_unit_regex.match(t.value):
return cls._def_custom_unit(t.value)
if cls._custom_unit_regex.match(t.value):
warnings.warn(
"Unit {!r} not supported by the VOUnit "
"standard. {}".format(
t.value, utils.did_you_mean_units(
t.value, cls._units, cls._deprecated_units,
cls._to_decomposed_alternative)),
core.UnitsWarning)
return cls._def_custom_unit(t.value)
raise
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
raise ValueError()
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], 'VOUnit',
cls._to_decomposed_alternative)
return cls._units[unit]
@classmethod
def _get_unit_name(cls, unit):
# The da- and d- prefixes are discouraged. This has the
# effect of adding a scale to value in the result.
if isinstance(unit, core.PrefixUnit):
if unit._represents.scale == 10.0:
raise ValueError(
"In '{}': VOUnit can not represent units with the 'da' "
"(deka) prefix".format(unit))
elif unit._represents.scale == 0.1:
raise ValueError(
"In '{}': VOUnit can not represent units with the 'd' "
"(deci) prefix".format(unit))
name = unit.get_format_name('vounit')
if unit in cls._custom_units.values():
return name
if name not in cls._units:
raise ValueError(
f"Unit {name!r} is not part of the VOUnit standard")
if name in cls._deprecated_units:
utils.unit_deprecation_warning(
name, unit, 'VOUnit',
cls._to_decomposed_alternative)
return name
@classmethod
def _def_custom_unit(cls, unit):
def def_base(name):
if name in cls._custom_units:
return cls._custom_units[name]
if name.startswith("'"):
return core.def_unit(
[name[1:-1], name],
format={'vounit': name},
namespace=cls._custom_units)
else:
return core.def_unit(
name, namespace=cls._custom_units)
if unit in cls._custom_units:
return cls._custom_units[unit]
for short, full, factor in core.si_prefixes:
for prefix in short:
if unit.startswith(prefix):
base_name = unit[len(prefix):]
base_unit = def_base(base_name)
return core.PrefixUnit(
[prefix + x for x in base_unit.names],
core.CompositeUnit(factor, [base_unit], [1],
_error_check=False),
format={'vounit': prefix + base_unit.names[-1]},
namespace=cls._custom_units)
return def_base(unit)
@classmethod
def _format_unit_list(cls, units):
out = []
units.sort(key=lambda x: cls._get_unit_name(x[0]).lower())
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
power = utils.format_power(power)
if '/' in power or '.' in power:
out.append(f'{cls._get_unit_name(base)}({power})')
else:
out.append(f'{cls._get_unit_name(base)}**{power}')
return '.'.join(out)
@classmethod
def to_string(cls, unit):
from astropy.units import core
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
if unit.physical_type == 'dimensionless' and unit.scale != 1:
raise core.UnitScaleError(
"The VOUnit format is not able to "
"represent scale for dimensionless units. "
"Multiply your data by {:e}."
.format(unit.scale))
s = ''
if unit.scale != 1:
s += f'{unit.scale:.8g}'
pairs = list(zip(unit.bases, unit.powers))
pairs.sort(key=operator.itemgetter(1), reverse=True)
s += cls._format_unit_list(pairs)
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
@classmethod
def _to_decomposed_alternative(cls, unit):
from astropy.units import core
try:
s = cls.to_string(unit)
except core.UnitScaleError:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return f'{cls.to_string(unit)} (with data multiplied by {scale})'
return s
|
3a717c8ceb7f9a2ecfe283f660fa2c8c5e16e15ba4cb0c586082f34124ac61c2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "Unicode" unit format.
"""
from . import console, utils
class Unicode(console.Console):
"""
Output-only format to display pretty formatting at the console
using Unicode characters.
For example::
>>> import astropy.units as u
>>> print(u.bar.decompose().to_string('unicode'))
kg
100000 ────
m s²
"""
_times = "×"
_line = "─"
@classmethod
def _get_unit_name(cls, unit):
return unit.get_format_name('unicode')
@classmethod
def format_exponential_notation(cls, val):
m, ex = utils.split_mantissa_exponent(val)
parts = []
if m:
parts.append(m.replace('-', '−'))
if ex:
parts.append(f"10{cls._format_superscript(ex)}")
return cls._times.join(parts)
@classmethod
def _format_superscript(cls, number):
mapping = {
'0': '⁰',
'1': '¹',
'2': '²',
'3': '³',
'4': '⁴',
'5': '⁵',
'6': '⁶',
'7': '⁷',
'8': '⁸',
'9': '⁹',
'-': '⁻',
'−': '⁻',
# This is actually a "raised omission bracket", but it's
# the closest thing I could find to a superscript solidus.
'/': '⸍',
}
output = []
for c in number:
output.append(mapping[c])
return ''.join(output)
|
5e257f847eca9c9432f227b47c7c92bbc4440220fbfa78bd9241294992996c27 | # Licensed under a 3-clause BSD style license - see LICNSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
Handles units in `Office of Guest Investigator Programs (OGIP)
FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__.
"""
import copy
import keyword
import math
import os
import warnings
from fractions import Fraction
from astropy.utils import parsing
from . import core, generic, utils
class OGIP(generic.Generic):
"""
Support the units in `Office of Guest Investigator Programs (OGIP)
FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__.
"""
_tokens = (
'DIVISION',
'OPEN_PAREN',
'CLOSE_PAREN',
'WHITESPACE',
'STARSTAR',
'STAR',
'SIGN',
'UFLOAT',
'LIT10',
'UINT',
'UNKNOWN',
'UNIT'
)
@staticmethod
def _generate_unit_names():
from astropy import units as u
names = {}
deprecated_names = set()
bases = [
'A', 'C', 'cd', 'eV', 'F', 'g', 'H', 'Hz', 'J',
'Jy', 'K', 'lm', 'lx', 'm', 'mol', 'N', 'ohm', 'Pa',
'pc', 'rad', 's', 'S', 'sr', 'T', 'V', 'W', 'Wb'
]
deprecated_bases = []
prefixes = [
'y', 'z', 'a', 'f', 'p', 'n', 'u', 'm', 'c', 'd',
'', 'da', 'h', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'
]
for base in bases + deprecated_bases:
for prefix in prefixes:
key = prefix + base
if keyword.iskeyword(key):
continue
names[key] = getattr(u, key)
for base in deprecated_bases:
for prefix in prefixes:
deprecated_names.add(prefix + base)
simple_units = [
'angstrom', 'arcmin', 'arcsec', 'AU', 'barn', 'bin',
'byte', 'chan', 'count', 'day', 'deg', 'erg', 'G',
'h', 'lyr', 'mag', 'min', 'photon', 'pixel',
'voxel', 'yr'
]
for unit in simple_units:
names[unit] = getattr(u, unit)
# Create a separate, disconnected unit for the special case of
# Crab and mCrab, since OGIP doesn't define their quantities.
Crab = u.def_unit(['Crab'], prefixes=False, doc='Crab (X-ray flux)')
mCrab = u.Unit(10 ** -3 * Crab)
names['Crab'] = Crab
names['mCrab'] = mCrab
deprecated_units = ['Crab', 'mCrab']
for unit in deprecated_units:
deprecated_names.add(unit)
# Define the function names, so we can parse them, even though
# we can't use any of them (other than sqrt) meaningfully for
# now.
functions = [
'log', 'ln', 'exp', 'sqrt', 'sin', 'cos', 'tan', 'asin',
'acos', 'atan', 'sinh', 'cosh', 'tanh'
]
for name in functions:
names[name] = name
return names, deprecated_names, functions
@classmethod
def _make_lexer(cls):
tokens = cls._tokens
t_DIVISION = r'/'
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
t_WHITESPACE = '[ \t]+'
t_STARSTAR = r'\*\*'
t_STAR = r'\*'
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r'(((\d+\.?\d*)|(\.\d+))([eE][+-]?\d+))|(((\d+\.\d*)|(\.\d+))([eE][+-]?\d+)?)'
t.value = float(t.value)
return t
def t_UINT(t):
r'\d+'
t.value = int(t.value)
return t
def t_SIGN(t):
r'[+-](?=\d)'
t.value = float(t.value + '1')
return t
def t_X(t): # multiplication for factor in front of unit
r'[x×]'
return t
def t_LIT10(t):
r'10'
return 10
def t_UNKNOWN(t):
r'[Uu][Nn][Kk][Nn][Oo][Ww][Nn]'
return None
def t_UNIT(t):
r'[a-zA-Z][a-zA-Z_]*'
t.value = cls._get_unit(t)
return t
# Don't ignore whitespace
t_ignore = ''
# Error handling rule
def t_error(t):
raise ValueError(
f"Invalid character at col {t.lexpos}")
return parsing.lex(lextab='ogip_lextab', package='astropy/units')
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the
`Specification of Physical Units within OGIP FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__,
which is not terribly precise. The exact grammar is here is
based on the YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
"""
tokens = cls._tokens
def p_main(p):
'''
main : UNKNOWN
| complete_expression
| scale_factor complete_expression
| scale_factor WHITESPACE complete_expression
'''
if len(p) == 4:
p[0] = p[1] * p[3]
elif len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1]
def p_complete_expression(p):
'''
complete_expression : product_of_units
'''
p[0] = p[1]
def p_product_of_units(p):
'''
product_of_units : unit_expression
| division unit_expression
| product_of_units product unit_expression
| product_of_units division unit_expression
'''
if len(p) == 4:
if p[2] == 'DIVISION':
p[0] = p[1] / p[3]
else:
p[0] = p[1] * p[3]
elif len(p) == 3:
p[0] = p[2] ** -1
else:
p[0] = p[1]
def p_unit_expression(p):
'''
unit_expression : unit
| UNIT OPEN_PAREN complete_expression CLOSE_PAREN
| OPEN_PAREN complete_expression CLOSE_PAREN
| UNIT OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power
| OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power
'''
# If we run p[1] in cls._functions, it will try and parse each
# item in the list into a unit, which is slow. Since we know that
# all the items in the list are strings, we can simply convert
# p[1] to a string instead.
p1_str = str(p[1])
if p1_str in cls._functions and p1_str != 'sqrt':
raise ValueError(
"The function '{}' is valid in OGIP, but not understood "
"by astropy.units.".format(
p[1]))
if len(p) == 7:
if p1_str == 'sqrt':
p[0] = p[1] * p[3] ** (0.5 * p[6])
else:
p[0] = p[1] * p[3] ** p[6]
elif len(p) == 6:
p[0] = p[2] ** p[5]
elif len(p) == 5:
if p1_str == 'sqrt':
p[0] = p[3] ** 0.5
else:
p[0] = p[1] * p[3]
elif len(p) == 4:
p[0] = p[2]
else:
p[0] = p[1]
def p_scale_factor(p):
'''
scale_factor : LIT10 power numeric_power
| LIT10
| signed_float
| signed_float power numeric_power
| signed_int power numeric_power
'''
if len(p) == 4:
p[0] = 10 ** p[3]
else:
p[0] = p[1]
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(p[0]) % 1.0 != 0.0:
from astropy.units.core import UnitsWarning
warnings.warn(
"'{}' scale should be a power of 10 in "
"OGIP format".format(p[0]), UnitsWarning)
def p_division(p):
'''
division : DIVISION
| WHITESPACE DIVISION
| WHITESPACE DIVISION WHITESPACE
| DIVISION WHITESPACE
'''
p[0] = 'DIVISION'
def p_product(p):
'''
product : WHITESPACE
| STAR
| WHITESPACE STAR
| WHITESPACE STAR WHITESPACE
| STAR WHITESPACE
'''
p[0] = 'PRODUCT'
def p_power(p):
'''
power : STARSTAR
'''
p[0] = 'POWER'
def p_unit(p):
'''
unit : UNIT
| UNIT power numeric_power
'''
if len(p) == 4:
p[0] = p[1] ** p[3]
else:
p[0] = p[1]
def p_numeric_power(p):
'''
numeric_power : UINT
| signed_float
| OPEN_PAREN signed_int CLOSE_PAREN
| OPEN_PAREN signed_float CLOSE_PAREN
| OPEN_PAREN signed_float division UINT CLOSE_PAREN
'''
if len(p) == 6:
p[0] = Fraction(int(p[2]), int(p[4]))
elif len(p) == 4:
p[0] = p[2]
else:
p[0] = p[1]
def p_sign(p):
'''
sign : SIGN
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_signed_int(p):
'''
signed_int : SIGN UINT
'''
p[0] = p[1] * p[2]
def p_signed_float(p):
'''
signed_float : sign UINT
| sign UFLOAT
'''
p[0] = p[1] * p[2]
def p_error(p):
raise ValueError()
return parsing.yacc(tabmodule='ogip_parsetab', package='astropy/units')
@classmethod
def _validate_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
"Unit '{}' not supported by the OGIP "
"standard. {}".format(
unit, utils.did_you_mean_units(
unit, cls._units, cls._deprecated_units,
cls._to_decomposed_alternative)))
else:
raise ValueError()
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], 'OGIP',
cls._to_decomposed_alternative)
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
cls._validate_unit(unit, detailed_exception=detailed_exception)
return cls._units[unit]
@classmethod
def parse(cls, s, debug=False):
s = s.strip()
try:
# This is a short circuit for the case where the string is
# just a single unit name
return cls._parse_unit(s, detailed_exception=False)
except ValueError:
try:
return core.Unit(
cls._parser.parse(s, lexer=cls._lexer, debug=debug))
except ValueError as e:
if str(e):
raise
else:
raise ValueError(
f"Syntax error parsing unit '{s}'")
@classmethod
def _get_unit_name(cls, unit):
name = unit.get_format_name('ogip')
cls._validate_unit(name)
return name
@classmethod
def _format_unit_list(cls, units):
out = []
units.sort(key=lambda x: cls._get_unit_name(x[0]).lower())
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
power = utils.format_power(power)
if '/' in power:
out.append(f'{cls._get_unit_name(base)}**({power})')
else:
out.append(f'{cls._get_unit_name(base)}**{power}')
return ' '.join(out)
@classmethod
def to_string(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(unit.scale) % 1.0 != 0.0:
warnings.warn(
f"'{unit.scale}' scale should be a power of 10 in OGIP format",
core.UnitsWarning)
return generic._to_string(cls, unit)
@classmethod
def _to_decomposed_alternative(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(unit.scale) % 1.0 != 0.0:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return '{} (with data multiplied by {})'.format(
generic._to_string(cls, unit), scale)
return generic._to_string(unit)
|
Subsets and Splits