hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
38ae7662ddbdff9e7820a18503b21979bd2f7e1ac6762043d684ee48500b836d | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/coordinates
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
This module contains utility functions that are for internal use in
astropy.coordinates.angles. Mainly they are conversions from one format
of data to another.
"""
import os
from warnings import warn
import numpy as np
from .errors import (IllegalHourWarning, IllegalHourError,
IllegalMinuteWarning, IllegalMinuteError,
IllegalSecondWarning, IllegalSecondError)
from astropy.utils import format_exception
from astropy import units as u
TAB_HEADER = """# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file was automatically generated from ply. To re-generate this file,
# remove it from this folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/coordinates
#
# You can then commit the changes to this file.
"""
class _AngleParser:
"""
Parses the various angle formats including:
* 01:02:30.43 degrees
* 1 2 0 hours
* 1°2′3″
* 1d2m3s
* -1h2m3s
This class should not be used directly. Use `parse_angle`
instead.
"""
def __init__(self):
# TODO: in principle, the parser should be invalidated if we change unit
# system (from CDS to FITS, say). Might want to keep a link to the
# unit_registry used, and regenerate the parser/lexer if it changes.
# Alternatively, perhaps one should not worry at all and just pre-
# generate the parser for each release (as done for unit formats).
# For some discussion of this problem, see
# https://github.com/astropy/astropy/issues/5350#issuecomment-248770151
if '_parser' not in _AngleParser.__dict__:
_AngleParser._parser, _AngleParser._lexer = self._make_parser()
@classmethod
def _get_simple_unit_names(cls):
simple_units = set(
u.radian.find_equivalent_units(include_prefix_units=True))
simple_unit_names = set()
# We filter out degree and hourangle, since those are treated
# separately.
for unit in simple_units:
if unit != u.deg and unit != u.hourangle:
simple_unit_names.update(unit.names)
return sorted(simple_unit_names)
@classmethod
def _make_parser(cls):
from astropy.extern.ply import lex, yacc
# List of token names.
tokens = (
'SIGN',
'UINT',
'UFLOAT',
'COLON',
'DEGREE',
'HOUR',
'MINUTE',
'SECOND',
'SIMPLE_UNIT'
)
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r'((\d+\.\d*)|(\.\d+))([eE][+-−]?\d+)?'
# The above includes Unicode "MINUS SIGN" \u2212. It is
# important to include the hyphen last, or the regex will
# treat this as a range.
t.value = float(t.value.replace('−', '-'))
return t
def t_UINT(t):
r'\d+'
t.value = int(t.value)
return t
def t_SIGN(t):
r'[+−-]'
# The above include Unicode "MINUS SIGN" \u2212. It is
# important to include the hyphen last, or the regex will
# treat this as a range.
if t.value == '+':
t.value = 1.0
else:
t.value = -1.0
return t
def t_SIMPLE_UNIT(t):
t.value = u.Unit(t.value)
return t
t_SIMPLE_UNIT.__doc__ = '|'.join(
'(?:{0})'.format(x) for x in cls._get_simple_unit_names())
t_COLON = ':'
t_DEGREE = r'd(eg(ree(s)?)?)?|°'
t_HOUR = r'hour(s)?|h(r)?|ʰ'
t_MINUTE = r'm(in(ute(s)?)?)?|′|\'|ᵐ'
t_SECOND = r's(ec(ond(s)?)?)?|″|\"|ˢ'
# A string containing ignored characters (spaces)
t_ignore = ' '
# Error handling rule
def t_error(t):
raise ValueError(
"Invalid character at col {0}".format(t.lexpos))
lexer_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'angle_lextab.py'))
# Build the lexer
lexer = lex.lex(optimize=True, lextab='angle_lextab',
outputdir=os.path.dirname(__file__))
if not lexer_exists:
cls._add_tab_header('angle_lextab')
def p_angle(p):
'''
angle : hms
| dms
| arcsecond
| arcminute
| simple
'''
p[0] = p[1]
def p_sign(p):
'''
sign : SIGN
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_ufloat(p):
'''
ufloat : UFLOAT
| UINT
'''
p[0] = float(p[1])
def p_colon(p):
'''
colon : sign UINT COLON ufloat
| sign UINT COLON UINT COLON ufloat
'''
if len(p) == 5:
p[0] = (p[1] * p[2], p[4])
elif len(p) == 7:
p[0] = (p[1] * p[2], p[4], p[6])
def p_spaced(p):
'''
spaced : sign UINT ufloat
| sign UINT UINT ufloat
'''
if len(p) == 4:
p[0] = (p[1] * p[2], p[3])
elif len(p) == 5:
p[0] = (p[1] * p[2], p[3], p[4])
def p_generic(p):
'''
generic : colon
| spaced
| sign UFLOAT
| sign UINT
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[1] * p[2]
def p_hms(p):
'''
hms : sign UINT HOUR
| sign UINT HOUR ufloat
| sign UINT HOUR UINT MINUTE
| sign UINT HOUR UFLOAT MINUTE
| sign UINT HOUR UINT MINUTE ufloat
| sign UINT HOUR UINT MINUTE ufloat SECOND
| generic HOUR
'''
if len(p) == 3:
p[0] = (p[1], u.hourangle)
elif len(p) == 4:
p[0] = (p[1] * p[2], u.hourangle)
elif len(p) in (5, 6):
p[0] = ((p[1] * p[2], p[4]), u.hourangle)
elif len(p) in (7, 8):
p[0] = ((p[1] * p[2], p[4], p[6]), u.hourangle)
def p_dms(p):
'''
dms : sign UINT DEGREE
| sign UINT DEGREE ufloat
| sign UINT DEGREE UINT MINUTE
| sign UINT DEGREE UFLOAT MINUTE
| sign UINT DEGREE UINT MINUTE ufloat
| sign UINT DEGREE UINT MINUTE ufloat SECOND
| generic DEGREE
'''
if len(p) == 3:
p[0] = (p[1], u.degree)
elif len(p) == 4:
p[0] = (p[1] * p[2], u.degree)
elif len(p) in (5, 6):
p[0] = ((p[1] * p[2], p[4]), u.degree)
elif len(p) in (7, 8):
p[0] = ((p[1] * p[2], p[4], p[6]), u.degree)
def p_simple(p):
'''
simple : generic
| generic SIMPLE_UNIT
'''
if len(p) == 2:
p[0] = (p[1], None)
else:
p[0] = (p[1], p[2])
def p_arcsecond(p):
'''
arcsecond : generic SECOND
'''
p[0] = (p[1], u.arcsecond)
def p_arcminute(p):
'''
arcminute : generic MINUTE
'''
p[0] = (p[1], u.arcminute)
def p_error(p):
raise ValueError
parser_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'angle_parsetab.py'))
parser = yacc.yacc(debug=False, tabmodule='angle_parsetab',
outputdir=os.path.dirname(__file__),
write_tables=True)
if not parser_exists:
cls._add_tab_header('angle_parsetab')
return parser, lexer
@classmethod
def _add_tab_header(cls, name):
lextab_file = os.path.join(os.path.dirname(__file__), name + '.py')
with open(lextab_file, 'r') as f:
contents = f.read()
with open(lextab_file, 'w') as f:
f.write(TAB_HEADER)
f.write(contents)
def parse(self, angle, unit, debug=False):
try:
found_angle, found_unit = self._parser.parse(
angle, lexer=self._lexer, debug=debug)
except ValueError as e:
if str(e):
raise ValueError("{0} in angle {1!r}".format(
str(e), angle))
else:
raise ValueError(
"Syntax error parsing angle {0!r}".format(angle))
if unit is None and found_unit is None:
raise u.UnitsError("No unit specified")
return found_angle, found_unit
def _check_hour_range(hrs):
"""
Checks that the given value is in the range (-24, 24).
"""
if np.any(np.abs(hrs) == 24.):
warn(IllegalHourWarning(hrs, 'Treating as 24 hr'))
elif np.any(hrs < -24.) or np.any(hrs > 24.):
raise IllegalHourError(hrs)
def _check_minute_range(m):
"""
Checks that the given value is in the range [0,60]. If the value
is equal to 60, then a warning is raised.
"""
if np.any(m == 60.):
warn(IllegalMinuteWarning(m, 'Treating as 0 min, +1 hr/deg'))
elif np.any(m < -60.) or np.any(m > 60.):
# "Error: minutes not in range [-60,60) ({0}).".format(min))
raise IllegalMinuteError(m)
def _check_second_range(sec):
"""
Checks that the given value is in the range [0,60]. If the value
is equal to 60, then a warning is raised.
"""
if np.any(sec == 60.):
warn(IllegalSecondWarning(sec, 'Treating as 0 sec, +1 min'))
elif sec is None:
pass
elif np.any(sec < -60.) or np.any(sec > 60.):
# "Error: seconds not in range [-60,60) ({0}).".format(sec))
raise IllegalSecondError(sec)
def check_hms_ranges(h, m, s):
"""
Checks that the given hour, minute and second are all within
reasonable range.
"""
_check_hour_range(h)
_check_minute_range(m)
_check_second_range(s)
return None
def parse_angle(angle, unit=None, debug=False):
"""
Parses an input string value into an angle value.
Parameters
----------
angle : str
A string representing the angle. May be in one of the following forms:
* 01:02:30.43 degrees
* 1 2 0 hours
* 1°2′3″
* 1d2m3s
* -1h2m3s
unit : `~astropy.units.UnitBase` instance, optional
The unit used to interpret the string. If ``unit`` is not
provided, the unit must be explicitly represented in the
string, either at the end or as number separators.
debug : bool, optional
If `True`, print debugging information from the parser.
Returns
-------
value, unit : tuple
``value`` is the value as a floating point number or three-part
tuple, and ``unit`` is a `Unit` instance which is either the
unit passed in or the one explicitly mentioned in the input
string.
"""
return _AngleParser().parse(angle, unit, debug=debug)
def degrees_to_dms(d):
"""
Convert a floating-point degree value into a ``(degree, arcminute,
arcsecond)`` tuple.
"""
sign = np.copysign(1.0, d)
(df, d) = np.modf(np.abs(d)) # (degree fraction, degree)
(mf, m) = np.modf(df * 60.) # (minute fraction, minute)
s = mf * 60.
return np.floor(sign * d), sign * np.floor(m), sign * s
def dms_to_degrees(d, m, s=None):
"""
Convert degrees, arcminute, arcsecond to a float degrees value.
"""
_check_minute_range(m)
_check_second_range(s)
# determine sign
sign = np.copysign(1.0, d)
try:
d = np.floor(np.abs(d))
if s is None:
m = np.abs(m)
s = 0
else:
m = np.floor(np.abs(m))
s = np.abs(s)
except ValueError:
raise ValueError(format_exception(
"{func}: dms values ({1[0]},{2[1]},{3[2]}) could not be "
"converted to numbers.", d, m, s))
return sign * (d + m / 60. + s / 3600.)
def hms_to_hours(h, m, s=None):
"""
Convert hour, minute, second to a float hour value.
"""
check_hms_ranges(h, m, s)
# determine sign
sign = np.copysign(1.0, h)
try:
h = np.floor(np.abs(h))
if s is None:
m = np.abs(m)
s = 0
else:
m = np.floor(np.abs(m))
s = np.abs(s)
except ValueError:
raise ValueError(format_exception(
"{func}: HMS values ({1[0]},{2[1]},{3[2]}) could not be "
"converted to numbers.", h, m, s))
return sign * (h + m / 60. + s / 3600.)
def hms_to_degrees(h, m, s):
"""
Convert hour, minute, second to a float degrees value.
"""
return hms_to_hours(h, m, s) * 15.
def hms_to_radians(h, m, s):
"""
Convert hour, minute, second to a float radians value.
"""
return u.degree.to(u.radian, hms_to_degrees(h, m, s))
def hms_to_dms(h, m, s):
"""
Convert degrees, arcminutes, arcseconds to an ``(hour, minute, second)``
tuple.
"""
return degrees_to_dms(hms_to_degrees(h, m, s))
def hours_to_decimal(h):
"""
Convert any parseable hour value into a float value.
"""
from . import angles
return angles.Angle(h, unit=u.hourangle).hour
def hours_to_radians(h):
"""
Convert an angle in Hours to Radians.
"""
return u.hourangle.to(u.radian, h)
def hours_to_hms(h):
"""
Convert an floating-point hour value into an ``(hour, minute,
second)`` tuple.
"""
sign = np.copysign(1.0, h)
(hf, h) = np.modf(np.abs(h)) # (degree fraction, degree)
(mf, m) = np.modf(hf * 60.0) # (minute fraction, minute)
s = mf * 60.0
return (np.floor(sign * h), sign * np.floor(m), sign * s)
def radians_to_degrees(r):
"""
Convert an angle in Radians to Degrees.
"""
return u.radian.to(u.degree, r)
def radians_to_hours(r):
"""
Convert an angle in Radians to Hours.
"""
return u.radian.to(u.hourangle, r)
def radians_to_hms(r):
"""
Convert an angle in Radians to an ``(hour, minute, second)`` tuple.
"""
hours = radians_to_hours(r)
return hours_to_hms(hours)
def radians_to_dms(r):
"""
Convert an angle in Radians to an ``(degree, arcminute,
arcsecond)`` tuple.
"""
degrees = u.radian.to(u.degree, r)
return degrees_to_dms(degrees)
def sexagesimal_to_string(values, precision=None, pad=False, sep=(':',),
fields=3):
"""
Given an already separated tuple of sexagesimal values, returns
a string.
See `hours_to_string` and `degrees_to_string` for a higher-level
interface to this functionality.
"""
# Check to see if values[0] is negative, using np.copysign to handle -0
sign = np.copysign(1.0, values[0])
# If the coordinates are negative, we need to take the absolute values.
# We use np.abs because abs(-0) is -0
# TODO: Is this true? (MHvK, 2018-02-01: not on my system)
values = [np.abs(value) for value in values]
if pad:
if sign == -1:
pad = 3
else:
pad = 2
else:
pad = 0
if not isinstance(sep, tuple):
sep = tuple(sep)
if fields < 1 or fields > 3:
raise ValueError(
"fields must be 1, 2, or 3")
if not sep: # empty string, False, or None, etc.
sep = ('', '', '')
elif len(sep) == 1:
if fields == 3:
sep = sep + (sep[0], '')
elif fields == 2:
sep = sep + ('', '')
else:
sep = ('', '', '')
elif len(sep) == 2:
sep = sep + ('',)
elif len(sep) != 3:
raise ValueError(
"Invalid separator specification for converting angle to string.")
# Simplify the expression based on the requested precision. For
# example, if the seconds will round up to 60, we should convert
# it to 0 and carry upwards. If the field is hidden (by the
# fields kwarg) we round up around the middle, 30.0.
if precision is None:
rounding_thresh = 60.0 - (10.0 ** -4)
else:
rounding_thresh = 60.0 - (10.0 ** -precision)
if fields == 3 and values[2] >= rounding_thresh:
values[2] = 0.0
values[1] += 1.0
elif fields < 3 and values[2] >= 30.0:
values[1] += 1.0
if fields >= 2 and values[1] >= 60.0:
values[1] = 0.0
values[0] += 1.0
elif fields < 2 and values[1] >= 30.0:
values[0] += 1.0
literal = []
last_value = ''
literal.append('{0:0{pad}.0f}{sep[0]}')
if fields >= 2:
literal.append('{1:02d}{sep[1]}')
if fields == 3:
if precision is None:
last_value = '{0:.4f}'.format(abs(values[2]))
last_value = last_value.rstrip('0').rstrip('.')
else:
last_value = '{0:.{precision}f}'.format(
abs(values[2]), precision=precision)
if len(last_value) == 1 or last_value[1] == '.':
last_value = '0' + last_value
literal.append('{last_value}{sep[2]}')
literal = ''.join(literal)
return literal.format(np.copysign(values[0], sign),
int(values[1]), values[2],
sep=sep, pad=pad,
last_value=last_value)
def hours_to_string(h, precision=5, pad=False, sep=('h', 'm', 's'),
fields=3):
"""
Takes a decimal hour value and returns a string formatted as hms with
separator specified by the 'sep' parameter.
``h`` must be a scalar.
"""
h, m, s = hours_to_hms(h)
return sexagesimal_to_string((h, m, s), precision=precision, pad=pad,
sep=sep, fields=fields)
def degrees_to_string(d, precision=5, pad=False, sep=':', fields=3):
"""
Takes a decimal hour value and returns a string formatted as dms with
separator specified by the 'sep' parameter.
``d`` must be a scalar.
"""
d, m, s = degrees_to_dms(d)
return sexagesimal_to_string((d, m, s), precision=precision, pad=pad,
sep=sep, fields=fields)
def angular_separation(lon1, lat1, lon2, lat2):
"""
Angular separation between two points on a sphere.
Parameters
----------
lon1, lat1, lon2, lat2 : `Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the two points. Quantities should be in
angular units; floats in radians.
Returns
-------
angular separation : `~astropy.units.Quantity` or float
Type depends on input; `Quantity` in angular units, or float in
radians.
Notes
-----
The angular separation is calculated using the Vincenty formula [1]_,
which is slightly more complex and computationally expensive than
some alternatives, but is stable at at all distances, including the
poles and antipodes.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
sdlon = np.sin(lon2 - lon1)
cdlon = np.cos(lon2 - lon1)
slat1 = np.sin(lat1)
slat2 = np.sin(lat2)
clat1 = np.cos(lat1)
clat2 = np.cos(lat2)
num1 = clat2 * sdlon
num2 = clat1 * slat2 - slat1 * clat2 * cdlon
denominator = slat1 * slat2 + clat1 * clat2 * cdlon
return np.arctan2(np.hypot(num1, num2), denominator)
def position_angle(lon1, lat1, lon2, lat2):
"""
Position Angle (East of North) between two points on a sphere.
Parameters
----------
lon1, lat1, lon2, lat2 : `Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the two points. Quantities should be in
angular units; floats in radians.
Returns
-------
pa : `~astropy.coordinates.Angle`
The (positive) position angle of the vector pointing from position 1 to
position 2. If any of the angles are arrays, this will contain an array
following the appropriate `numpy` broadcasting rules.
"""
from .angles import Angle
deltalon = lon2 - lon1
colat = np.cos(lat2)
x = np.sin(lat2) * np.cos(lat1) - colat * np.sin(lat1) * np.cos(deltalon)
y = np.sin(deltalon) * colat
return Angle(np.arctan2(y, x), u.radian).wrap_at(360*u.deg)
def offset_by(lon, lat, posang, distance):
"""
Point with the given offset from the given point.
Parameters
----------
lon, lat, posang, distance : `Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the starting point,
position angle and distance to the final point.
Quantities should be in angular units; floats in radians.
Polar points at lat= +/-90 are treated as limit of +/-(90-epsilon) and same lon.
Returns
-------
lon, lat : `~astropy.coordinates.Angle`
The position of the final point. If any of the angles are arrays,
these will contain arrays following the appropriate `numpy` broadcasting rules.
0 <= lon < 2pi.
Notes
-----
"""
from .angles import Angle
# Calculations are done using the spherical trigonometry sine and cosine rules
# of the triangle A at North Pole, B at starting point, C at final point
# with angles A (change in lon), B (posang), C (not used, but negative reciprocal posang)
# with sides a (distance), b (final co-latitude), c (starting colatitude)
# B, a, c are knowns; A and b are unknowns
# https://en.wikipedia.org/wiki/Spherical_trigonometry
cos_a = np.cos(distance)
sin_a = np.sin(distance)
cos_c = np.sin(lat)
sin_c = np.cos(lat)
cos_B = np.cos(posang)
sin_B = np.sin(posang)
# cosine rule: Know two sides: a,c and included angle: B; get unknown side b
cos_b = cos_c * cos_a + sin_c * sin_a * cos_B
# sin_b = np.sqrt(1 - cos_b**2)
# sine rule and cosine rule for A (using both lets arctan2 pick quadrant).
# multiplying both sin_A and cos_A by x=sin_b * sin_c prevents /0 errors
# at poles. Correct for the x=0 multiplication a few lines down.
# sin_A/sin_a == sin_B/sin_b # Sine rule
xsin_A = sin_a * sin_B * sin_c
# cos_a == cos_b * cos_c + sin_b * sin_c * cos_A # cosine rule
xcos_A = cos_a - cos_b * cos_c
A = Angle(np.arctan2(xsin_A, xcos_A), u.radian)
# Treat the poles as if they are infinitesimally far from pole but at given lon
# The +0*xsin_A is to broadcast a scalar to vector as necessary
w_pole = np.argwhere((sin_c + 0*xsin_A) < 1e-12)
if len(w_pole) > 0:
# For south pole (cos_c = -1), A = posang; for North pole, A=180 deg - posang
A_pole = (90*u.deg + cos_c*(90*u.deg-Angle(posang, u.radian))).to(u.rad)
try:
A[w_pole] = A_pole[w_pole]
except TypeError as e: # scalar
A = A_pole
outlon = (Angle(lon, u.radian) + A).wrap_at(360.0*u.deg).to(u.deg)
outlat = Angle(np.arcsin(cos_b), u.radian).to(u.deg)
return outlon, outlat
|
43ba89e40c6bca095b6c9262a8c302021de543c0d5f4154877f965d2ecc698d9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains a general framework for defining graphs of transformations
between coordinates, suitable for either spatial coordinates or more generalized
coordinate systems.
The fundamental idea is that each class is a node in the transformation graph,
and transitions from one node to another are defined as functions (or methods)
wrapped in transformation objects.
This module also includes more specific transformation classes for
celestial/spatial coordinate frames, generally focused around matrix-style
transformations that are typically how the algorithms are defined.
"""
import heapq
import inspect
import subprocess
from warnings import warn
from abc import ABCMeta, abstractmethod
from collections import defaultdict, OrderedDict
from contextlib import suppress
from inspect import signature
import numpy as np
from astropy import units as u
from astropy.utils.exceptions import AstropyWarning
from .representation import REPRESENTATION_CLASSES
from .matrix_utilities import matrix_product
__all__ = ['TransformGraph', 'CoordinateTransform', 'FunctionTransform',
'BaseAffineTransform', 'AffineTransform',
'StaticMatrixTransform', 'DynamicMatrixTransform',
'FunctionTransformWithFiniteDifference', 'CompositeTransform']
def frame_attrs_from_set(frame_set):
"""
A `dict` of all the attributes of all frame classes in this
`TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = {}
for frame_cls in frame_set:
result.update(frame_cls.frame_attributes)
return result
def frame_comps_from_set(frame_set):
"""
A `set` of all component names every defined within any frame class in
this `TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = set()
for frame_cls in frame_set:
rep_info = frame_cls._frame_specific_representation_info
for mappings in rep_info.values():
for rep_map in mappings:
result.update([rep_map.framename])
return result
class TransformGraph:
"""
A graph representing the paths between coordinate frames.
"""
def __init__(self):
self._graph = defaultdict(dict)
self.invalidate_cache() # generates cache entries
@property
def _cached_names(self):
if self._cached_names_dct is None:
self._cached_names_dct = dct = {}
for c in self.frame_set:
nm = getattr(c, 'name', None)
if nm is not None:
dct[nm] = c
return self._cached_names_dct
@property
def frame_set(self):
"""
A `set` of all the frame classes present in this `TransformGraph`.
"""
if self._cached_frame_set is None:
self._cached_frame_set = set()
for a in self._graph:
self._cached_frame_set.add(a)
for b in self._graph[a]:
self._cached_frame_set.add(b)
return self._cached_frame_set.copy()
@property
def frame_attributes(self):
"""
A `dict` of all the attributes of all frame classes in this
`TransformGraph`.
"""
if self._cached_frame_attributes is None:
self._cached_frame_attributes = frame_attrs_from_set(self.frame_set)
return self._cached_frame_attributes
@property
def frame_component_names(self):
"""
A `set` of all component names every defined within any frame class in
this `TransformGraph`.
"""
if self._cached_component_names is None:
self._cached_component_names = frame_comps_from_set(self.frame_set)
return self._cached_component_names
def invalidate_cache(self):
"""
Invalidates the cache that stores optimizations for traversing the
transform graph. This is called automatically when transforms
are added or removed, but will need to be called manually if
weights on transforms are modified inplace.
"""
self._cached_names_dct = None
self._cached_frame_set = None
self._cached_frame_attributes = None
self._cached_component_names = None
self._shortestpaths = {}
self._composite_cache = {}
def add_transform(self, fromsys, tosys, transform):
"""
Add a new coordinate transformation to the graph.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
transform : CoordinateTransform or similar callable
The transformation object. Typically a `CoordinateTransform` object,
although it may be some other callable that is called with the same
signature.
Raises
------
TypeError
If ``fromsys`` or ``tosys`` are not classes or ``transform`` is
not callable.
"""
if not inspect.isclass(fromsys):
raise TypeError('fromsys must be a class')
if not inspect.isclass(tosys):
raise TypeError('tosys must be a class')
if not callable(transform):
raise TypeError('transform must be callable')
frame_set = self.frame_set.copy()
frame_set.add(fromsys)
frame_set.add(tosys)
# Now we check to see if any attributes on the proposed frames override
# *any* component names, which we can't allow for some of the logic in
# the SkyCoord initializer to work
attrs = set(frame_attrs_from_set(frame_set).keys())
comps = frame_comps_from_set(frame_set)
invalid_attrs = attrs.intersection(comps)
if invalid_attrs:
invalid_frames = set()
for attr in invalid_attrs:
if attr in fromsys.frame_attributes:
invalid_frames.update([fromsys])
if attr in tosys.frame_attributes:
invalid_frames.update([tosys])
raise ValueError("Frame(s) {0} contain invalid attribute names: {1}"
"\nFrame attributes can not conflict with *any* of"
" the frame data component names (see"
" `frame_transform_graph.frame_component_names`)."
.format(list(invalid_frames), invalid_attrs))
self._graph[fromsys][tosys] = transform
self.invalidate_cache()
def remove_transform(self, fromsys, tosys, transform):
"""
Removes a coordinate transform from the graph.
Parameters
----------
fromsys : class or `None`
The coordinate frame *class* to start from. If `None`,
``transform`` will be searched for and removed (``tosys`` must
also be `None`).
tosys : class or `None`
The coordinate frame *class* to transform into. If `None`,
``transform`` will be searched for and removed (``fromsys`` must
also be `None`).
transform : callable or `None`
The transformation object to be removed or `None`. If `None`
and ``tosys`` and ``fromsys`` are supplied, there will be no
check to ensure the correct object is removed.
"""
if fromsys is None or tosys is None:
if not (tosys is None and fromsys is None):
raise ValueError('fromsys and tosys must both be None if either are')
if transform is None:
raise ValueError('cannot give all Nones to remove_transform')
# search for the requested transform by brute force and remove it
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
if b is transform:
del agraph[b]
break
else:
raise ValueError('Could not find transform {0} in the '
'graph'.format(transform))
else:
if transform is None:
self._graph[fromsys].pop(tosys, None)
else:
curr = self._graph[fromsys].get(tosys, None)
if curr is transform:
self._graph[fromsys].pop(tosys)
else:
raise ValueError('Current transform from {0} to {1} is not '
'{2}'.format(fromsys, tosys, transform))
self.invalidate_cache()
def find_shortest_path(self, fromsys, tosys):
"""
Computes the shortest distance along the transform graph from
one system to another.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
path : list of classes or `None`
The path from ``fromsys`` to ``tosys`` as an in-order sequence
of classes. This list includes *both* ``fromsys`` and
``tosys``. Is `None` if there is no possible path.
distance : number
The total distance/priority from ``fromsys`` to ``tosys``. If
priorities are not set this is the number of transforms
needed. Is ``inf`` if there is no possible path.
"""
inf = float('inf')
# special-case the 0 or 1-path
if tosys is fromsys:
if tosys not in self._graph[fromsys]:
# Means there's no transform necessary to go from it to itself.
return [tosys], 0
if tosys in self._graph[fromsys]:
# this will also catch the case where tosys is fromsys, but has
# a defined transform.
t = self._graph[fromsys][tosys]
return [fromsys, tosys], float(t.priority if hasattr(t, 'priority') else 1)
# otherwise, need to construct the path:
if fromsys in self._shortestpaths:
# already have a cached result
fpaths = self._shortestpaths[fromsys]
if tosys in fpaths:
return fpaths[tosys]
else:
return None, inf
# use Dijkstra's algorithm to find shortest path in all other cases
nodes = []
# first make the list of nodes
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
if fromsys not in nodes or tosys not in nodes:
# fromsys or tosys are isolated or not registered, so there's
# certainly no way to get from one to the other
return None, inf
edgeweights = {}
# construct another graph that is a dict of dicts of priorities
# (used as edge weights in Dijkstra's algorithm)
for a in self._graph:
edgeweights[a] = aew = {}
agraph = self._graph[a]
for b in agraph:
aew[b] = float(agraph[b].priority if hasattr(agraph[b], 'priority') else 1)
# entries in q are [distance, count, nodeobj, pathlist]
# count is needed because in py 3.x, tie-breaking fails on the nodes.
# this way, insertion order is preserved if the weights are the same
q = [[inf, i, n, []] for i, n in enumerate(nodes) if n is not fromsys]
q.insert(0, [0, -1, fromsys, []])
# this dict will store the distance to node from ``fromsys`` and the path
result = {}
# definitely starts as a valid heap because of the insert line; from the
# node to itself is always the shortest distance
while len(q) > 0:
d, orderi, n, path = heapq.heappop(q)
if d == inf:
# everything left is unreachable from fromsys, just copy them to
# the results and jump out of the loop
result[n] = (None, d)
for d, orderi, n, path in q:
result[n] = (None, d)
break
else:
result[n] = (path, d)
path.append(n)
if n not in edgeweights:
# this is a system that can be transformed to, but not from.
continue
for n2 in edgeweights[n]:
if n2 not in result: # already visited
# find where n2 is in the heap
for i in range(len(q)):
if q[i][2] == n2:
break
else:
raise ValueError('n2 not in heap - this should be impossible!')
newd = d + edgeweights[n][n2]
if newd < q[i][0]:
q[i][0] = newd
q[i][3] = list(path)
heapq.heapify(q)
# cache for later use
self._shortestpaths[fromsys] = result
return result[tosys]
def get_transform(self, fromsys, tosys):
"""
Generates and returns the `CompositeTransform` for a transformation
between two coordinate systems.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
trans : `CompositeTransform` or `None`
If there is a path from ``fromsys`` to ``tosys``, this is a
transform object for that path. If no path could be found, this is
`None`.
Notes
-----
This function always returns a `CompositeTransform`, because
`CompositeTransform` is slightly more adaptable in the way it can be
called than other transform classes. Specifically, it takes care of
intermediate steps of transformations in a way that is consistent with
1-hop transformations.
"""
if not inspect.isclass(fromsys):
raise TypeError('fromsys is not a class')
if not inspect.isclass(tosys):
raise TypeError('tosys is not a class')
path, distance = self.find_shortest_path(fromsys, tosys)
if path is None:
return None
transforms = []
currsys = fromsys
for p in path[1:]: # first element is fromsys so we skip it
transforms.append(self._graph[currsys][p])
currsys = p
fttuple = (fromsys, tosys)
if fttuple not in self._composite_cache:
comptrans = CompositeTransform(transforms, fromsys, tosys,
register_graph=False)
self._composite_cache[fttuple] = comptrans
return self._composite_cache[fttuple]
def lookup_name(self, name):
"""
Tries to locate the coordinate class with the provided alias.
Parameters
----------
name : str
The alias to look up.
Returns
-------
coordcls
The coordinate class corresponding to the ``name`` or `None` if
no such class exists.
"""
return self._cached_names.get(name, None)
def get_names(self):
"""
Returns all available transform names. They will all be
valid arguments to `lookup_name`.
Returns
-------
nms : list
The aliases for coordinate systems.
"""
return list(self._cached_names.keys())
def to_dot_graph(self, priorities=True, addnodes=[], savefn=None,
savelayout='plain', saveformat=None, color_edges=True):
"""
Converts this transform graph to the graphviz_ DOT format.
Optionally saves it (requires `graphviz`_ be installed and on your path).
.. _graphviz: http://www.graphviz.org/
Parameters
----------
priorities : bool
If `True`, show the priority values for each transform. Otherwise,
the will not be included in the graph.
addnodes : sequence of str
Additional coordinate systems to add (this can include systems
already in the transform graph, but they will only appear once).
savefn : `None` or str
The file name to save this graph to or `None` to not save
to a file.
savelayout : str
The graphviz program to use to layout the graph (see
graphviz_ for details) or 'plain' to just save the DOT graph
content. Ignored if ``savefn`` is `None`.
saveformat : str
The graphviz output format. (e.g. the ``-Txxx`` option for
the command line program - see graphviz docs for details).
Ignored if ``savefn`` is `None`.
color_edges : bool
Color the edges between two nodes (frames) based on the type of
transform. ``FunctionTransform``: red, ``StaticMatrixTransform``:
blue, ``DynamicMatrixTransform``: green.
Returns
-------
dotgraph : str
A string with the DOT format graph.
"""
nodes = []
# find the node names
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
for node in addnodes:
if node not in nodes:
nodes.append(node)
nodenames = []
invclsaliases = dict([(v, k) for k, v in self._cached_names.items()])
for n in nodes:
if n in invclsaliases:
nodenames.append('{0} [shape=oval label="{0}\\n`{1}`"]'.format(n.__name__, invclsaliases[n]))
else:
nodenames.append(n.__name__ + '[ shape=oval ]')
edgenames = []
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, 'priority') else 1
color = trans_to_color[transform.__class__] if color_edges else 'black'
edgenames.append((a.__name__, b.__name__, pri, color))
# generate simple dot format graph
lines = ['digraph AstropyCoordinateTransformGraph {']
lines.append('; '.join(nodenames) + ';')
for enm1, enm2, weights, color in edgenames:
labelstr_fmt = '[ {0} {1} ]'
if priorities:
priority_part = 'label = "{0}"'.format(weights)
else:
priority_part = ''
color_part = 'color = "{0}"'.format(color)
labelstr = labelstr_fmt.format(priority_part, color_part)
lines.append('{0} -> {1}{2};'.format(enm1, enm2, labelstr))
lines.append('')
lines.append('overlap=false')
lines.append('}')
dotgraph = '\n'.join(lines)
if savefn is not None:
if savelayout == 'plain':
with open(savefn, 'w') as f:
f.write(dotgraph)
else:
args = [savelayout]
if saveformat is not None:
args.append('-T' + saveformat)
proc = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate(dotgraph)
if proc.returncode != 0:
raise OSError('problem running graphviz: \n' + stderr)
with open(savefn, 'w') as f:
f.write(stdout)
return dotgraph
def to_networkx_graph(self):
"""
Converts this transform graph into a networkx graph.
.. note::
You must have the `networkx <http://networkx.lanl.gov/>`_
package installed for this to work.
Returns
-------
nxgraph : `networkx.Graph <http://networkx.lanl.gov/reference/classes.graph.html>`_
This `TransformGraph` as a `networkx.Graph`_.
"""
import networkx as nx
nxgraph = nx.Graph()
# first make the nodes
for a in self._graph:
if a not in nxgraph:
nxgraph.add_node(a)
for b in self._graph[a]:
if b not in nxgraph:
nxgraph.add_node(b)
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, 'priority') else 1
color = trans_to_color[transform.__class__]
nxgraph.add_edge(a, b, weight=pri, color=color)
return nxgraph
def transform(self, transcls, fromsys, tosys, priority=1, **kwargs):
"""
A function decorator for defining transformations.
.. note::
If decorating a static method of a class, ``@staticmethod``
should be added *above* this decorator.
Parameters
----------
transcls : class
The class of the transformation object to create.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
Additional keyword arguments are passed into the ``transcls``
constructor.
Returns
-------
deco : function
A function that can be called on another function as a decorator
(see example).
Notes
-----
This decorator assumes the first argument of the ``transcls``
initializer accepts a callable, and that the second and third
are ``fromsys`` and ``tosys``. If this is not true, you should just
initialize the class manually and use `add_transform` instead of
using this decorator.
Examples
--------
::
graph = TransformGraph()
class Frame1(BaseCoordinateFrame):
...
class Frame2(BaseCoordinateFrame):
...
@graph.transform(FunctionTransform, Frame1, Frame2)
def f1_to_f2(f1_obj):
... do something with f1_obj ...
return f2_obj
"""
def deco(func):
# this doesn't do anything directly with the transform because
# ``register_graph=self`` stores it in the transform graph
# automatically
transcls(func, fromsys, tosys, priority=priority,
register_graph=self, **kwargs)
return func
return deco
# <-------------------Define the builtin transform classes-------------------->
class CoordinateTransform(metaclass=ABCMeta):
"""
An object that transforms a coordinate from one system to another.
Subclasses must implement `__call__` with the provided signature.
They should also call this superclass's ``__init__`` in their
``__init__``.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
"""
def __init__(self, fromsys, tosys, priority=1, register_graph=None):
if not inspect.isclass(fromsys):
raise TypeError('fromsys must be a class')
if not inspect.isclass(tosys):
raise TypeError('tosys must be a class')
self.fromsys = fromsys
self.tosys = tosys
self.priority = float(priority)
if register_graph:
# this will do the type-checking when it adds to the graph
self.register(register_graph)
else:
if not inspect.isclass(fromsys) or not inspect.isclass(tosys):
raise TypeError('fromsys and tosys must be classes')
self.overlapping_frame_attr_names = overlap = []
if (hasattr(fromsys, 'get_frame_attr_names') and
hasattr(tosys, 'get_frame_attr_names')):
# the if statement is there so that non-frame things might be usable
# if it makes sense
for from_nm in fromsys.frame_attributes.keys():
if from_nm in tosys.frame_attributes.keys():
overlap.append(from_nm)
def register(self, graph):
"""
Add this transformation to the requested Transformation graph,
replacing anything already connecting these two coordinates.
Parameters
----------
graph : a TransformGraph object
The graph to register this transformation with.
"""
graph.add_transform(self.fromsys, self.tosys, self)
def unregister(self, graph):
"""
Remove this transformation from the requested transformation
graph.
Parameters
----------
graph : a TransformGraph object
The graph to unregister this transformation from.
Raises
------
ValueError
If this is not currently in the transform graph.
"""
graph.remove_transform(self.fromsys, self.tosys, self)
@abstractmethod
def __call__(self, fromcoord, toframe):
"""
Does the actual coordinate transformation from the ``fromsys`` class to
the ``tosys`` class.
Parameters
----------
fromcoord : fromsys object
An object of class matching ``fromsys`` that is to be transformed.
toframe : object
An object that has the attributes necessary to fully specify the
frame. That is, it must have attributes with names that match the
keys of the dictionary that ``tosys.get_frame_attr_names()``
returns. Typically this is of class ``tosys``, but it *might* be
some other class as long as it has the appropriate attributes.
Returns
-------
tocoord : tosys object
The new coordinate after the transform has been applied.
"""
class FunctionTransform(CoordinateTransform):
"""
A coordinate transformation defined by a function that accepts a
coordinate object and returns the transformed coordinate object.
Parameters
----------
func : callable
The transformation function. Should have a call signature
``func(formcoord, toframe)``. Note that, unlike
`CoordinateTransform.__call__`, ``toframe`` is assumed to be of type
``tosys`` for this function.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``func`` is not callable.
ValueError
If ``func`` cannot accept two arguments.
"""
def __init__(self, func, fromsys, tosys, priority=1, register_graph=None):
if not callable(func):
raise TypeError('func must be callable')
with suppress(TypeError):
sig = signature(func)
kinds = [x.kind for x in sig.parameters.values()]
if (len(x for x in kinds if x == sig.POSITIONAL_ONLY) != 2
and sig.VAR_POSITIONAL not in kinds):
raise ValueError('provided function does not accept two arguments')
self.func = func
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
def __call__(self, fromcoord, toframe):
res = self.func(fromcoord, toframe)
if not isinstance(res, self.tosys):
raise TypeError('the transformation function yielded {0} but '
'should have been of type {1}'.format(res, self.tosys))
if fromcoord.data.differentials and not res.data.differentials:
warn("Applied a FunctionTransform to a coordinate frame with "
"differentials, but the FunctionTransform does not handle "
"differentials, so they have been dropped.", AstropyWarning)
return res
class FunctionTransformWithFiniteDifference(FunctionTransform):
r"""
A coordinate transformation that works like a `FunctionTransform`, but
computes velocity shifts based on the finite-difference relative to one of
the frame attributes. Note that the transform function should *not* change
the differential at all in this case, as any differentials will be
overridden.
When a differential is in the from coordinate, the finite difference
calculation has two components. The first part is simple the existing
differential, but re-orientation (using finite-difference techniques) to
point in the direction the velocity vector has in the *new* frame. The
second component is the "induced" velocity. That is, the velocity
intrinsic to the frame itself, estimated by shifting the frame using the
``finite_difference_frameattr_name`` frame attribute a small amount
(``finite_difference_dt``) in time and re-calculating the position.
Parameters
----------
finite_difference_frameattr_name : str or None
The name of the frame attribute on the frames to use for the finite
difference. Both the to and the from frame will be checked for this
attribute, but only one needs to have it. If None, no velocity
component induced from the frame itself will be included - only the
re-orientation of any existing differential.
finite_difference_dt : `~astropy.units.Quantity` or callable
If a quantity, this is the size of the differential used to do the
finite difference. If a callable, should accept
``(fromcoord, toframe)`` and return the ``dt`` value.
symmetric_finite_difference : bool
If True, the finite difference is computed as
:math:`\frac{x(t + \Delta t / 2) - x(t + \Delta t / 2)}{\Delta t}`, or
if False, :math:`\frac{x(t + \Delta t) - x(t)}{\Delta t}`. The latter
case has slightly better performance (and more stable finite difference
behavior).
All other parameters are identical to the initializer for
`FunctionTransform`.
"""
def __init__(self, func, fromsys, tosys, priority=1, register_graph=None,
finite_difference_frameattr_name='obstime',
finite_difference_dt=1*u.second,
symmetric_finite_difference=True):
super().__init__(func, fromsys, tosys, priority, register_graph)
self.finite_difference_frameattr_name = finite_difference_frameattr_name
self.finite_difference_dt = finite_difference_dt
self.symmetric_finite_difference = symmetric_finite_difference
@property
def finite_difference_frameattr_name(self):
return self._finite_difference_frameattr_name
@finite_difference_frameattr_name.setter
def finite_difference_frameattr_name(self, value):
if value is None:
self._diff_attr_in_fromsys = self._diff_attr_in_tosys = False
else:
diff_attr_in_fromsys = value in self.fromsys.frame_attributes
diff_attr_in_tosys = value in self.tosys.frame_attributes
if diff_attr_in_fromsys or diff_attr_in_tosys:
self._diff_attr_in_fromsys = diff_attr_in_fromsys
self._diff_attr_in_tosys = diff_attr_in_tosys
else:
raise ValueError('Frame attribute name {} is not a frame '
'attribute of {} or {}'.format(value,
self.fromsys,
self.tosys))
self._finite_difference_frameattr_name = value
def __call__(self, fromcoord, toframe):
from .representation import (CartesianRepresentation,
CartesianDifferential)
supcall = self.func
if fromcoord.data.differentials:
# this is the finite difference case
if callable(self.finite_difference_dt):
dt = self.finite_difference_dt(fromcoord, toframe)
else:
dt = self.finite_difference_dt
halfdt = dt/2
from_diffless = fromcoord.realize_frame(fromcoord.data.without_differentials())
reprwithoutdiff = supcall(from_diffless, toframe)
# first we use the existing differential to compute an offset due to
# the already-existing velocity, but in the new frame
fromcoord_cart = fromcoord.cartesian
if self.symmetric_finite_difference:
fwdxyz = (fromcoord_cart.xyz +
fromcoord_cart.differentials['s'].d_xyz*halfdt)
fwd = supcall(fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe)
backxyz = (fromcoord_cart.xyz -
fromcoord_cart.differentials['s'].d_xyz*halfdt)
back = supcall(fromcoord.realize_frame(CartesianRepresentation(backxyz)), toframe)
else:
fwdxyz = (fromcoord_cart.xyz +
fromcoord_cart.differentials['s'].d_xyz*dt)
fwd = supcall(fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe)
back = reprwithoutdiff
diffxyz = (fwd.cartesian - back.cartesian).xyz / dt
# now we compute the "induced" velocities due to any movement in
# the frame itself over time
attrname = self.finite_difference_frameattr_name
if attrname is not None:
if self.symmetric_finite_difference:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + halfdt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + halfdt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) - halfdt}
from_diffless_back = from_diffless.replicate(**kws)
else:
from_diffless_back = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) - halfdt}
back_frame = toframe.replicate_without_data(**kws)
else:
back_frame = toframe
back = supcall(from_diffless_back, back_frame)
else:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + dt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + dt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
back = reprwithoutdiff
diffxyz += (fwd.cartesian - back.cartesian).xyz / dt
newdiff = CartesianDifferential(diffxyz)
reprwithdiff = reprwithoutdiff.data.to_cartesian().with_differentials(newdiff)
return reprwithoutdiff.realize_frame(reprwithdiff)
else:
return supcall(fromcoord, toframe)
class BaseAffineTransform(CoordinateTransform):
"""Base class for common functionality between the ``AffineTransform``-type
subclasses.
This base class is needed because ``AffineTransform`` and the matrix
transform classes share the ``_apply_transform()`` method, but have
different ``__call__()`` methods. ``StaticMatrixTransform`` passes in a
matrix stored as a class attribute, and both of the matrix transforms pass
in ``None`` for the offset. Hence, user subclasses would likely want to
subclass this (rather than ``AffineTransform``) if they want to provide
alternative transformations using this machinery.
"""
def _apply_transform(self, fromcoord, matrix, offset):
from .representation import (UnitSphericalRepresentation,
CartesianDifferential,
SphericalDifferential,
SphericalCosLatDifferential,
RadialDifferential)
data = fromcoord.data
has_velocity = 's' in data.differentials
# list of unit differentials
_unit_diffs = (SphericalDifferential._unit_differential,
SphericalCosLatDifferential._unit_differential)
unit_vel_diff = (has_velocity and
isinstance(data.differentials['s'], _unit_diffs))
rad_vel_diff = (has_velocity and
isinstance(data.differentials['s'], RadialDifferential))
# Some initial checking to short-circuit doing any re-representation if
# we're going to fail anyways:
if isinstance(data, UnitSphericalRepresentation) and offset is not None:
raise TypeError("Position information stored on coordinate frame "
"is insufficient to do a full-space position "
"transformation (representation class: {0})"
.format(data.__class__))
elif (has_velocity and (unit_vel_diff or rad_vel_diff) and
offset is not None and 's' in offset.differentials):
# Coordinate has a velocity, but it is not a full-space velocity
# that we need to do a velocity offset
raise TypeError("Velocity information stored on coordinate frame "
"is insufficient to do a full-space velocity "
"transformation (differential class: {0})"
.format(data.differentials['s'].__class__))
elif len(data.differentials) > 1:
# We should never get here because the frame initializer shouldn't
# allow more differentials, but this just adds protection for
# subclasses that somehow skip the checks
raise ValueError("Representation passed to AffineTransform contains"
" multiple associated differentials. Only a single"
" differential with velocity units is presently"
" supported (differentials: {0})."
.format(str(data.differentials)))
# If the representation is a UnitSphericalRepresentation, and this is
# just a MatrixTransform, we have to try to turn the differential into a
# Unit version of the differential (if no radial velocity) or a
# sphericaldifferential with zero proper motion (if only a radial
# velocity) so that the matrix operation works
if (has_velocity and isinstance(data, UnitSphericalRepresentation) and
not unit_vel_diff and not rad_vel_diff):
# retrieve just velocity differential
unit_diff = data.differentials['s'].represent_as(
data.differentials['s']._unit_differential, data)
data = data.with_differentials({'s': unit_diff}) # updates key
# If it's a RadialDifferential, we flat-out ignore the differentials
# This is because, by this point (past the validation above), we can
# only possibly be doing a rotation-only transformation, and that
# won't change the radial differential. We later add it back in
elif rad_vel_diff:
data = data.without_differentials()
# Convert the representation and differentials to cartesian without
# having them attached to a frame
rep = data.to_cartesian()
diffs = dict([(k, diff.represent_as(CartesianDifferential, data))
for k, diff in data.differentials.items()])
rep = rep.with_differentials(diffs)
# Only do transform if matrix is specified. This is for speed in
# transformations that only specify an offset (e.g., LSR)
if matrix is not None:
# Note: this applies to both representation and differentials
rep = rep.transform(matrix)
# TODO: if we decide to allow arithmetic between representations that
# contain differentials, this can be tidied up
if offset is not None:
newrep = (rep.without_differentials() +
offset.without_differentials())
else:
newrep = rep.without_differentials()
# We need a velocity (time derivative) and, for now, are strict: the
# representation can only contain a velocity differential and no others.
if has_velocity and not rad_vel_diff:
veldiff = rep.differentials['s'] # already in Cartesian form
if offset is not None and 's' in offset.differentials:
veldiff = veldiff + offset.differentials['s']
newrep = newrep.with_differentials({'s': veldiff})
if isinstance(fromcoord.data, UnitSphericalRepresentation):
# Special-case this because otherwise the return object will think
# it has a valid distance with the default return (a
# CartesianRepresentation instance)
if has_velocity and not unit_vel_diff and not rad_vel_diff:
# We have to first represent as the Unit types we converted to,
# then put the d_distance information back in to the
# differentials and re-represent as their original forms
newdiff = newrep.differentials['s']
_unit_cls = fromcoord.data.differentials['s']._unit_differential
newdiff = newdiff.represent_as(_unit_cls, newrep)
kwargs = dict([(comp, getattr(newdiff, comp))
for comp in newdiff.components])
kwargs['d_distance'] = fromcoord.data.differentials['s'].d_distance
diffs = {'s': fromcoord.data.differentials['s'].__class__(
copy=False, **kwargs)}
elif has_velocity and unit_vel_diff:
newdiff = newrep.differentials['s'].represent_as(
fromcoord.data.differentials['s'].__class__, newrep)
diffs = {'s': newdiff}
else:
diffs = newrep.differentials
newrep = newrep.represent_as(fromcoord.data.__class__) # drops diffs
newrep = newrep.with_differentials(diffs)
elif has_velocity and unit_vel_diff:
# Here, we're in the case where the representation is not
# UnitSpherical, but the differential *is* one of the UnitSpherical
# types. We have to convert back to that differential class or the
# resulting frame will think it has a valid radial_velocity. This
# can probably be cleaned up: we currently have to go through the
# dimensional version of the differential before representing as the
# unit differential so that the units work out (the distance length
# unit shouldn't appear in the resulting proper motions)
diff_cls = fromcoord.data.differentials['s'].__class__
newrep = newrep.represent_as(fromcoord.data.__class__,
diff_cls._dimensional_differential)
newrep = newrep.represent_as(fromcoord.data.__class__, diff_cls)
# We pulled the radial differential off of the representation
# earlier, so now we need to put it back. But, in order to do that, we
# have to turn the representation into a repr that is compatible with
# having a RadialDifferential
if has_velocity and rad_vel_diff:
newrep = newrep.represent_as(fromcoord.data.__class__)
newrep = newrep.with_differentials(
{'s': fromcoord.data.differentials['s']})
return newrep
class AffineTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a 3 x 3
cartesian transformation matrix and a tuple of displacement vectors.
See `~astropy.coordinates.builtin_frames.galactocentric.Galactocentric` for
an example.
Parameters
----------
transform_func : callable
A callable that has the signature ``transform_func(fromcoord, toframe)``
and returns: a (3, 3) matrix that operates on ``fromcoord`` in a
Cartesian representation, and a ``CartesianRepresentation`` with
(optionally) an attached velocity ``CartesianDifferential`` to represent
a translation and offset in velocity to apply after the matrix
operation.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``transform_func`` is not callable
"""
def __init__(self, transform_func, fromsys, tosys, priority=1,
register_graph=None):
if not callable(transform_func):
raise TypeError('transform_func is not callable')
self.transform_func = transform_func
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
def __call__(self, fromcoord, toframe):
M, vec = self.transform_func(fromcoord, toframe)
newrep = self._apply_transform(fromcoord, M, vec)
return toframe.realize_frame(newrep)
class StaticMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation defined as a 3 x 3 cartesian
transformation matrix.
This is distinct from DynamicMatrixTransform in that this kind of matrix is
independent of frame attributes. That is, it depends *only* on the class of
the frame.
Parameters
----------
matrix : array-like or callable
A 3 x 3 matrix for transforming 3-vectors. In most cases will
be unitary (although this is not strictly required). If a callable,
will be called *with no arguments* to get the matrix.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
ValueError
If the matrix is not 3 x 3
"""
def __init__(self, matrix, fromsys, tosys, priority=1, register_graph=None):
if callable(matrix):
matrix = matrix()
self.matrix = np.array(matrix)
if self.matrix.shape != (3, 3):
raise ValueError('Provided matrix is not 3 x 3')
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
def __call__(self, fromcoord, toframe):
newrep = self._apply_transform(fromcoord, self.matrix, None)
return toframe.realize_frame(newrep)
class DynamicMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a
3 x 3 cartesian transformation matrix.
This is similar to, but distinct from StaticMatrixTransform, in that the
matrix for this class might depend on frame attributes.
Parameters
----------
matrix_func : callable
A callable that has the signature ``matrix_func(fromcoord, toframe)`` and
returns a 3 x 3 matrix that converts ``fromcoord`` in a cartesian
representation to the new coordinate system.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``matrix_func`` is not callable
"""
def __init__(self, matrix_func, fromsys, tosys, priority=1,
register_graph=None):
if not callable(matrix_func):
raise TypeError('matrix_func is not callable')
self.matrix_func = matrix_func
def _transform_func(fromcoord, toframe):
return self.matrix_func(fromcoord, toframe), None
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
def __call__(self, fromcoord, toframe):
M = self.matrix_func(fromcoord, toframe)
newrep = self._apply_transform(fromcoord, M, None)
return toframe.realize_frame(newrep)
class CompositeTransform(CoordinateTransform):
"""
A transformation constructed by combining together a series of single-step
transformations.
Note that the intermediate frame objects are constructed using any frame
attributes in ``toframe`` or ``fromframe`` that overlap with the intermediate
frame (``toframe`` favored over ``fromframe`` if there's a conflict). Any frame
attributes that are not present use the defaults.
Parameters
----------
transforms : sequence of `CoordinateTransform` objects
The sequence of transformations to apply.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
collapse_static_mats : bool
If `True`, consecutive `StaticMatrixTransform` will be collapsed into a
single transformation to speed up the calculation.
"""
def __init__(self, transforms, fromsys, tosys, priority=1,
register_graph=None, collapse_static_mats=True):
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
if collapse_static_mats:
transforms = self._combine_statics(transforms)
self.transforms = tuple(transforms)
def _combine_statics(self, transforms):
"""
Combines together sequences of `StaticMatrixTransform`s into a single
transform and returns it.
"""
newtrans = []
for currtrans in transforms:
lasttrans = newtrans[-1] if len(newtrans) > 0 else None
if (isinstance(lasttrans, StaticMatrixTransform) and
isinstance(currtrans, StaticMatrixTransform)):
combinedmat = matrix_product(currtrans.matrix, lasttrans.matrix)
newtrans[-1] = StaticMatrixTransform(combinedmat,
lasttrans.fromsys,
currtrans.tosys)
else:
newtrans.append(currtrans)
return newtrans
def __call__(self, fromcoord, toframe):
curr_coord = fromcoord
for t in self.transforms:
# build an intermediate frame with attributes taken from either
# `fromframe`, or if not there, `toframe`, or if not there, use
# the defaults
# TODO: caching this information when creating the transform may
# speed things up a lot
frattrs = {}
for inter_frame_attr_nm in t.tosys.get_frame_attr_names():
if hasattr(toframe, inter_frame_attr_nm):
attr = getattr(toframe, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
elif hasattr(fromcoord, inter_frame_attr_nm):
attr = getattr(fromcoord, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
curr_toframe = t.tosys(**frattrs)
curr_coord = t(curr_coord, curr_toframe)
# this is safe even in the case where self.transforms is empty, because
# coordinate objects are immutible, so copying is not needed
return curr_coord
# map class names to colorblind-safe colors
trans_to_color = OrderedDict()
trans_to_color[AffineTransform] = '#555555' # gray
trans_to_color[FunctionTransform] = '#783001' # dark red-ish/brown
trans_to_color[FunctionTransformWithFiniteDifference] = '#d95f02' # red-ish
trans_to_color[StaticMatrixTransform] = '#7570b3' # blue-ish
trans_to_color[DynamicMatrixTransform] = '#1b9e77' # green-ish
|
bccb41060d5e30af2018e274d8c88dd535af3cae4738f513e520a97ec5b7490a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the classes and utility functions for distance and
cartesian coordinates.
"""
import warnings
import numpy as np
from astropy import units as u
from astropy.utils.exceptions import AstropyWarning
from .angles import Angle
__all__ = ['Distance']
__doctest_requires__ = {'*': ['scipy.integrate']}
class Distance(u.SpecificTypeQuantity):
"""
A one-dimensional distance.
This can be initialized in one of four ways:
* A distance ``value`` (array or float) and a ``unit``
* A `~astropy.units.Quantity` object
* A redshift and (optionally) a cosmology.
* Providing a distance modulus
Parameters
----------
value : scalar or `~astropy.units.Quantity`.
The value of this distance.
unit : `~astropy.units.UnitBase`
The units for this distance, *if* ``value`` is not a
`~astropy.units.Quantity`. Must have dimensions of distance.
z : float
A redshift for this distance. It will be converted to a distance
by computing the luminosity distance for this redshift given the
cosmology specified by ``cosmology``. Must be given as a keyword
argument.
cosmology : ``Cosmology`` or `None`
A cosmology that will be used to compute the distance from ``z``.
If `None`, the current cosmology will be used (see
`astropy.cosmology` for details).
distmod : float or `~astropy.units.Quantity`
The distance modulus for this distance. Note that if ``unit`` is not
provided, a guess will be made at the unit between AU, pc, kpc, and Mpc.
parallax : `~astropy.units.Quantity` or `~astropy.coordinates.Angle`
The parallax in angular units.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
order : {'C', 'F', 'A'}, optional
See `~astropy.units.Quantity`.
subok : bool, optional
See `~astropy.units.Quantity`.
ndmin : int, optional
See `~astropy.units.Quantity`.
allow_negative : bool, optional
Whether to allow negative distances (which are possible is some
cosmologies). Default: ``False``.
Raises
------
`~astropy.units.UnitsError`
If the ``unit`` is not a distance.
ValueError
If value specified is less than 0 and ``allow_negative=False``.
If ``z`` is provided with a ``unit`` or ``cosmology`` is provided
when ``z`` is *not* given, or ``value`` is given as well as ``z``.
Examples
--------
>>> from astropy import units as u
>>> from astropy import cosmology
>>> from astropy.cosmology import WMAP5, WMAP7
>>> cosmology.set_current(WMAP7)
>>> d1 = Distance(10, u.Mpc)
>>> d2 = Distance(40, unit=u.au)
>>> d3 = Distance(value=5, unit=u.kpc)
>>> d4 = Distance(z=0.23)
>>> d5 = Distance(z=0.23, cosmology=WMAP5)
>>> d6 = Distance(distmod=24.47)
>>> d7 = Distance(Distance(10 * u.Mpc))
>>> d8 = Distance(parallax=21.34*u.mas)
"""
_equivalent_unit = u.m
_include_easy_conversion_members = True
def __new__(cls, value=None, unit=None, z=None, cosmology=None,
distmod=None, parallax=None, dtype=None, copy=True, order=None,
subok=False, ndmin=0, allow_negative=False):
if z is not None:
if value is not None or distmod is not None:
raise ValueError('Should given only one of `value`, `z` '
'or `distmod` in Distance constructor.')
if cosmology is None:
from astropy.cosmology import default_cosmology
cosmology = default_cosmology.get()
value = cosmology.luminosity_distance(z)
# Continue on to take account of unit and other arguments
# but a copy is already made, so no longer necessary
copy = False
else:
if cosmology is not None:
raise ValueError('A `cosmology` was given but `z` was not '
'provided in Distance constructor')
value_msg = ('Should given only one of `value`, `z`, `distmod`, or '
'`parallax` in Distance constructor.')
n_not_none = np.sum([x is not None
for x in [value, z, distmod, parallax]])
if n_not_none > 1:
raise ValueError(value_msg)
if distmod is not None:
value = cls._distmod_to_pc(distmod)
if unit is None:
# if the unit is not specified, guess based on the mean of
# the log of the distance
meanlogval = np.log10(value.value).mean()
if meanlogval > 6:
unit = u.Mpc
elif meanlogval > 3:
unit = u.kpc
elif meanlogval < -3: # ~200 AU
unit = u.AU
else:
unit = u.pc
# Continue on to take account of unit and other arguments
# but a copy is already made, so no longer necessary
copy = False
elif parallax is not None:
value = parallax.to_value(u.pc, equivalencies=u.parallax())
unit = u.pc
# Continue on to take account of unit and other arguments
# but a copy is already made, so no longer necessary
copy = False
if np.any(parallax < 0):
if allow_negative:
warnings.warn(
"Negative parallaxes are converted to NaN "
"distances even when `allow_negative=True`, "
"because negative parallaxes cannot be transformed "
"into distances. See discussion in this paper: "
"https://arxiv.org/abs/1507.02105", AstropyWarning)
else:
raise ValueError("Some parallaxes are negative, which "
"are notinterpretable as distances. "
"See the discussion in this paper: "
"https://arxiv.org/abs/1507.02105 . "
"If you want parallaxes to pass "
"through, with negative parallaxes "
"instead becoming NaN, use the "
"`allow_negative=True` argument.")
elif value is None:
raise ValueError('None of `value`, `z`, `distmod`, or '
'`parallax` were given to Distance '
'constructor')
# now we have arguments like for a Quantity, so let it do the work
distance = super().__new__(
cls, value, unit, dtype=dtype, copy=copy, order=order,
subok=subok, ndmin=ndmin)
if not allow_negative and np.any(distance.value < 0):
raise ValueError("Distance must be >= 0. Use the argument "
"'allow_negative=True' to allow negative values.")
return distance
@property
def z(self):
"""Short for ``self.compute_z()``"""
return self.compute_z()
def compute_z(self, cosmology=None):
"""
The redshift for this distance assuming its physical distance is
a luminosity distance.
Parameters
----------
cosmology : ``Cosmology`` or `None`
The cosmology to assume for this calculation, or `None` to use the
current cosmology (see `astropy.cosmology` for details).
Returns
-------
z : float
The redshift of this distance given the provided ``cosmology``.
"""
if cosmology is None:
from astropy.cosmology import default_cosmology
cosmology = default_cosmology.get()
from astropy.cosmology import z_at_value
return z_at_value(cosmology.luminosity_distance, self, ztol=1.e-10)
@property
def distmod(self):
"""The distance modulus as a `~astropy.units.Quantity`"""
val = 5. * np.log10(self.to_value(u.pc)) - 5.
return u.Quantity(val, u.mag, copy=False)
@classmethod
def _distmod_to_pc(cls, dm):
dm = u.Quantity(dm, u.mag)
return cls(10 ** ((dm.value + 5) / 5.), u.pc, copy=False)
@property
def parallax(self):
"""The parallax angle as an `~astropy.coordinates.Angle` object"""
return Angle(self.to(u.milliarcsecond, u.parallax()))
|
db848bd12384f057f0f31a84ddf75a664065db0e94d70a898f5db254fda86f78 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions for coordinate-related functionality.
This is generally just wrapping around the object-oriented coordinates
framework, but it is useful for some users who are used to more functional
interfaces.
"""
import warnings
from collections.abc import Sequence
import numpy as np
from astropy import units as u
from astropy.constants import c
from astropy import _erfa as erfa
from astropy.io import ascii
from astropy.utils import isiterable, data
from .sky_coordinate import SkyCoord
from .builtin_frames import GCRS, PrecessedGeocentric
from .representation import SphericalRepresentation, CartesianRepresentation
from .builtin_frames.utils import get_jd12
__all__ = ['cartesian_to_spherical', 'spherical_to_cartesian', 'get_sun',
'get_constellation', 'concatenate_representations', 'concatenate']
def cartesian_to_spherical(x, y, z):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
Note that the resulting angles are latitude/longitude or
elevation/azimuthal form. I.e., the origin is along the equator
rather than at the north pole.
.. note::
This function simply wraps functionality provided by the
`~astropy.coordinates.CartesianRepresentation` and
`~astropy.coordinates.SphericalRepresentation` classes. In general,
for both performance and readability, we suggest using these classes
directly. But for situations where a quick one-off conversion makes
sense, this function is provided.
Parameters
----------
x : scalar, array-like, or `~astropy.units.Quantity`
The first cartesian coordinate.
y : scalar, array-like, or `~astropy.units.Quantity`
The second cartesian coordinate.
z : scalar, array-like, or `~astropy.units.Quantity`
The third cartesian coordinate.
Returns
-------
r : `~astropy.units.Quantity`
The radial coordinate (in the same units as the inputs).
lat : `~astropy.units.Quantity`
The latitude in radians
lon : `~astropy.units.Quantity`
The longitude in radians
"""
if not hasattr(x, 'unit'):
x = x * u.dimensionless_unscaled
if not hasattr(y, 'unit'):
y = y * u.dimensionless_unscaled
if not hasattr(z, 'unit'):
z = z * u.dimensionless_unscaled
cart = CartesianRepresentation(x, y, z)
sph = cart.represent_as(SphericalRepresentation)
return sph.distance, sph.lat, sph.lon
def spherical_to_cartesian(r, lat, lon):
"""
Converts spherical polar coordinates to rectangular cartesian
coordinates.
Note that the input angles should be in latitude/longitude or
elevation/azimuthal form. I.e., the origin is along the equator
rather than at the north pole.
.. note::
This is a low-level function used internally in
`astropy.coordinates`. It is provided for users if they really
want to use it, but it is recommended that you use the
`astropy.coordinates` coordinate systems.
Parameters
----------
r : scalar, array-like, or `~astropy.units.Quantity`
The radial coordinate (in the same units as the inputs).
lat : scalar, array-like, or `~astropy.units.Quantity`
The latitude (in radians if array or scalar)
lon : scalar, array-like, or `~astropy.units.Quantity`
The longitude (in radians if array or scalar)
Returns
-------
x : float or array
The first cartesian coordinate.
y : float or array
The second cartesian coordinate.
z : float or array
The third cartesian coordinate.
"""
if not hasattr(r, 'unit'):
r = r * u.dimensionless_unscaled
if not hasattr(lat, 'unit'):
lat = lat * u.radian
if not hasattr(lon, 'unit'):
lon = lon * u.radian
sph = SphericalRepresentation(distance=r, lat=lat, lon=lon)
cart = sph.represent_as(CartesianRepresentation)
return cart.x, cart.y, cart.z
def get_sun(time):
"""
Determines the location of the sun at a given time (or times, if the input
is an array `~astropy.time.Time` object), in geocentric coordinates.
Parameters
----------
time : `~astropy.time.Time`
The time(s) at which to compute the location of the sun.
Returns
-------
newsc : `~astropy.coordinates.SkyCoord`
The location of the sun as a `~astropy.coordinates.SkyCoord` in the
`~astropy.coordinates.GCRS` frame.
Notes
-----
The algorithm for determining the sun/earth relative position is based
on the simplified version of VSOP2000 that is part of ERFA. Compared to
JPL's ephemeris, it should be good to about 4 km (in the Sun-Earth
vector) from 1900-2100 C.E., 8 km for the 1800-2200 span, and perhaps
250 km over the 1000-3000.
"""
earth_pv_helio, earth_pv_bary = erfa.epv00(*get_jd12(time, 'tdb'))
# We have to manually do aberration because we're outputting directly into
# GCRS
earth_p = earth_pv_helio['p']
earth_v = earth_pv_bary['v']
# convert barycentric velocity to units of c, but keep as array for passing in to erfa
earth_v /= c.to_value(u.au/u.d)
dsun = np.sqrt(np.sum(earth_p**2, axis=-1))
invlorentz = (1-np.sum(earth_v**2, axis=-1))**0.5
properdir = erfa.ab(earth_p/dsun.reshape(dsun.shape + (1,)),
-earth_v, dsun, invlorentz)
cartrep = CartesianRepresentation(x=-dsun*properdir[..., 0] * u.AU,
y=-dsun*properdir[..., 1] * u.AU,
z=-dsun*properdir[..., 2] * u.AU)
return SkyCoord(cartrep, frame=GCRS(obstime=time))
# global dictionary that caches repeatedly-needed info for get_constellation
_constellation_data = {}
def get_constellation(coord, short_name=False, constellation_list='iau'):
"""
Determines the constellation(s) a given coordinate object contains.
Parameters
----------
coord : coordinate object
The object to determine the constellation of.
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If ``coords`` contains a scalar coordinate, returns the name of the
constellation. If it is an array coordinate object, it returns an array
of names.
Notes
-----
To determine which constellation a point on the sky is in, this precesses
to B1875, and then uses the Delporte boundaries of the 88 modern
constellations, as tabulated by
`Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_.
"""
if constellation_list != 'iau':
raise ValueError("only 'iau' us currently supported for constellation_list")
# read the data files and cache them if they haven't been already
if not _constellation_data:
cdata = data.get_pkg_data_contents('data/constellation_data_roman87.dat')
ctable = ascii.read(cdata, names=['ral', 'rau', 'decl', 'name'])
cnames = data.get_pkg_data_contents('data/constellation_names.dat', encoding='UTF8')
cnames_short_to_long = dict([(l[:3], l[4:])
for l in cnames.split('\n')
if not l.startswith('#')])
cnames_long = np.array([cnames_short_to_long[nm] for nm in ctable['name']])
_constellation_data['ctable'] = ctable
_constellation_data['cnames_long'] = cnames_long
else:
ctable = _constellation_data['ctable']
cnames_long = _constellation_data['cnames_long']
isscalar = coord.isscalar
# if it is geocentric, we reproduce the frame but with the 1875 equinox,
# which is where the constellations are defined
# this yields a "dubious year" warning because ERFA considers the year 1875
# "dubious", probably because UTC isn't well-defined then and precession
# models aren't precisely calibrated back to then. But it's plenty
# sufficient for constellations
with warnings.catch_warnings():
warnings.simplefilter('ignore', erfa.ErfaWarning)
constel_coord = coord.transform_to(PrecessedGeocentric(equinox='B1875'))
if isscalar:
rah = constel_coord.ra.ravel().hour
decd = constel_coord.dec.ravel().deg
else:
rah = constel_coord.ra.hour
decd = constel_coord.dec.deg
constellidx = -np.ones(len(rah), dtype=int)
notided = constellidx == -1 # should be all
for i, row in enumerate(ctable):
msk = (row['ral'] < rah) & (rah < row['rau']) & (decd > row['decl'])
constellidx[notided & msk] = i
notided = constellidx == -1
if np.sum(notided) == 0:
break
else:
raise ValueError('Could not find constellation for coordinates {0}'.format(constel_coord[notided]))
if short_name:
names = ctable['name'][constellidx]
else:
names = cnames_long[constellidx]
if isscalar:
return names[0]
else:
return names
def _concatenate_components(reps_difs, names):
""" Helper function for the concatenate function below. Gets and
concatenates all of the individual components for an iterable of
representations or differentials.
"""
values = []
for name in names:
data_vals = []
for x in reps_difs:
data_val = getattr(x, name)
data_vals.append(data_val.reshape(1, ) if x.isscalar else data_val)
concat_vals = np.concatenate(data_vals)
# Hack because np.concatenate doesn't fully work with Quantity
if isinstance(concat_vals, u.Quantity):
concat_vals._unit = data_val.unit
values.append(concat_vals)
return values
def concatenate_representations(reps):
"""
Combine multiple representation objects into a single instance by
concatenating the data in each component.
Currently, all of the input representations have to be the same type. This
properly handles differential or velocity data, but all input objects must
have the same differential object type as well.
Parameters
----------
reps : sequence of representation objects
The objects to concatenate
Returns
-------
rep : `~astropy.coordinates.BaseRepresentation` subclass
A single representation object with its data set to the concatenation of
all the elements of the input sequence of representations.
"""
if not isinstance(reps, (Sequence, np.ndarray)):
raise TypeError('Input must be a list or iterable of representation '
'objects.')
# First, validate that the represenations are the same, and
# concatenate all of the positional data:
rep_type = type(reps[0])
if any(type(r) != rep_type for r in reps):
raise TypeError('Input representations must all have the same type.')
# Construct the new representation with the concatenated data from the
# representations passed in
values = _concatenate_components(reps,
rep_type.attr_classes.keys())
new_rep = rep_type(*values)
has_diff = any('s' in rep.differentials for rep in reps)
if has_diff and any('s' not in rep.differentials for rep in reps):
raise ValueError('Input representations must either all contain '
'differentials, or not contain differentials.')
if has_diff:
dif_type = type(reps[0].differentials['s'])
if any('s' not in r.differentials or
type(r.differentials['s']) != dif_type
for r in reps):
raise TypeError('All input representations must have the same '
'differential type.')
values = _concatenate_components([r.differentials['s'] for r in reps],
dif_type.attr_classes.keys())
new_dif = dif_type(*values)
new_rep = new_rep.with_differentials({'s': new_dif})
return new_rep
def concatenate(coords):
"""
Combine multiple coordinate objects into a single
`~astropy.coordinates.SkyCoord`.
"Coordinate objects" here mean frame objects with data,
`~astropy.coordinates.SkyCoord`, or representation objects. Currently,
they must all be in the same frame, but in a future version this may be
relaxed to allow inhomogenous sequences of objects.
Parameters
----------
coords : sequence of coordinate objects
The objects to concatenate
Returns
-------
cskycoord : SkyCoord
A single sky coordinate with its data set to the concatenation of all
the elements in ``coords``
"""
if getattr(coords, 'isscalar', False) or not isiterable(coords):
raise TypeError('The argument to concatenate must be iterable')
scs = [SkyCoord(coord, copy=False) for coord in coords]
# Check that all frames are equivalent
for sc in scs[1:]:
if not sc.is_equivalent_frame(scs[0]):
raise ValueError("All inputs must have equivalent frames: "
"{0} != {1}".format(sc, scs[0]))
# TODO: this can be changed to SkyCoord.from_representation() for a speed
# boost when we switch to using classmethods
return SkyCoord(concatenate_representations([c.data for c in coords]),
frame=scs[0].frame)
|
6c6aa3fe609463788a17065d32806fb850c2a45fb924d655fdc0fe8ff65754a4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
from collections.abc import Sequence
import inspect
import numpy as np
from astropy.units import Unit, IrreducibleUnit
from astropy import units as u
from .baseframe import (BaseCoordinateFrame, frame_transform_graph,
_get_repr_cls, _get_diff_cls,
_normalize_representation_type)
from .builtin_frames import ICRS
from .representation import (BaseRepresentation, SphericalRepresentation,
UnitSphericalRepresentation)
"""
This module contains utility functions to make the SkyCoord initializer more modular
and maintainable. No functionality here should be in the public API, but rather used as
part of creating SkyCoord objects.
"""
PLUS_MINUS_RE = re.compile(r'(\+|\-)')
J_PREFIXED_RA_DEC_RE = re.compile(
r"""J # J prefix
([0-9]{6,7}\.?[0-9]{0,2}) # RA as HHMMSS.ss or DDDMMSS.ss, optional decimal digits
([\+\-][0-9]{6}\.?[0-9]{0,2})\s*$ # Dec as DDMMSS.ss, optional decimal digits
""", re.VERBOSE)
def _get_frame_class(frame):
"""
Get a frame class from the input `frame`, which could be a frame name
string, or frame class.
"""
if isinstance(frame, str):
frame_names = frame_transform_graph.get_names()
if frame not in frame_names:
raise ValueError('Coordinate frame name "{0}" is not a known '
'coordinate frame ({1})'
.format(frame, sorted(frame_names)))
frame_cls = frame_transform_graph.lookup_name(frame)
elif inspect.isclass(frame) and issubclass(frame, BaseCoordinateFrame):
frame_cls = frame
else:
raise ValueError("Coordinate frame must be a frame name or frame "
"class, not a '{0}'".format(frame.__class__.__name__))
return frame_cls
_conflict_err_msg = ("Coordinate attribute '{0}'={1!r} conflicts with keyword "
"argument '{0}'={2!r}. This usually means an attribute "
"was set on one of the input objects and also in the "
"keyword arguments to {3}")
def _get_frame_without_data(args, kwargs):
"""
Determines the coordinate frame from input SkyCoord args and kwargs.
This function extracts (removes) all frame attributes from the kwargs and
determines the frame class either using the kwargs, or using the first
element in the args (if a single frame object is passed in, for example).
This function allows a frame to be specified as a string like 'icrs' or a
frame class like ICRS, or an instance ICRS(), as long as the instance frame
attributes don't conflict with kwargs passed in (which could require a
three-way merge with the coordinate data possibly specified via the args).
"""
from .sky_coordinate import SkyCoord
# We eventually (hopefully) fill and return these by extracting the frame
# and frame attributes from the input:
frame_cls = None
frame_cls_kwargs = {}
# The first place to check: the frame could be specified explicitly
frame = kwargs.pop('frame', None)
if frame is not None:
# Here the frame was explicitly passed in as a keyword argument.
# If the frame is an instance or SkyCoord, we extract the attributes
# and split the instance into the frame class and an attributes dict
if isinstance(frame, SkyCoord):
# If the frame was passed as a SkyCoord, we also want to preserve
# any extra attributes (e.g., obstime) if they are not already
# specified in the kwargs. We preserve these extra attributes by
# adding them to the kwargs dict:
for attr in frame._extra_frameattr_names:
if (attr in kwargs and
np.any(getattr(frame, attr) != kwargs[attr])):
# This SkyCoord attribute passed in with the frame= object
# conflicts with an attribute passed in directly to the
# SkyCoord initializer as a kwarg:
raise ValueError(_conflict_err_msg
.format(attr, getattr(frame, attr),
kwargs[attr], 'SkyCoord'))
else:
kwargs[attr] = getattr(frame, attr)
frame = frame.frame
if isinstance(frame, BaseCoordinateFrame):
# Extract any frame attributes
for attr in frame.get_frame_attr_names():
# If the frame was specified as an instance, we have to make
# sure that no frame attributes were specified as kwargs - this
# would require a potential three-way merge:
if attr in kwargs:
raise ValueError("Cannot specify frame attribute '{0}' "
"directly as an argument to SkyCoord "
"because a frame instance was passed in. "
"Either pass a frame class, or modify the "
"frame attributes of the input frame "
"instance.".format(attr))
elif not frame.is_frame_attr_default(attr):
kwargs[attr] = getattr(frame, attr)
frame_cls = frame.__class__
# Make sure we propagate representation/differential _type choices,
# unless these are specified directly in the kwargs:
kwargs.setdefault('representation_type', frame.representation_type)
kwargs.setdefault('differential_type', frame.differential_type)
if frame_cls is None: # frame probably a string
frame_cls = _get_frame_class(frame)
# Check that the new frame doesn't conflict with existing coordinate frame
# if a coordinate is supplied in the args list. If the frame still had not
# been set by this point and a coordinate was supplied, then use that frame.
for arg in args:
# this catches the "single list passed in" case. For that case we want
# to allow the first argument to set the class. That's OK because
# _parse_coordinate_arg goes and checks that the frames match between
# the first and all the others
if (isinstance(arg, (Sequence, np.ndarray)) and
len(args) == 1 and len(arg) > 0):
arg = arg[0]
coord_frame_obj = coord_frame_cls = None
if isinstance(arg, BaseCoordinateFrame):
coord_frame_obj = arg
elif isinstance(arg, SkyCoord):
coord_frame_obj = arg.frame
if coord_frame_obj is not None:
coord_frame_cls = coord_frame_obj.__class__
frame_diff = coord_frame_obj.get_representation_cls('s')
if frame_diff is not None:
# we do this check because otherwise if there's no default
# differential (i.e. it is None), the code below chokes. but
# None still gets through if the user *requests* it
kwargs.setdefault('differential_type', frame_diff)
for attr in coord_frame_obj.get_frame_attr_names():
if (attr in kwargs and
not coord_frame_obj.is_frame_attr_default(attr) and
np.any(kwargs[attr] != getattr(coord_frame_obj, attr))):
raise ValueError("Frame attribute '{0}' has conflicting "
"values between the input coordinate data "
"and either keyword arguments or the "
"frame specification (frame=...): "
"{1} =/= {2}"
.format(attr,
getattr(coord_frame_obj, attr),
kwargs[attr]))
elif (attr not in kwargs and
not coord_frame_obj.is_frame_attr_default(attr)):
kwargs[attr] = getattr(coord_frame_obj, attr)
if coord_frame_cls is not None:
if frame_cls is None:
frame_cls = coord_frame_cls
elif frame_cls is not coord_frame_cls:
raise ValueError("Cannot override frame='{0}' of input "
"coordinate with new frame='{1}'. Instead, "
"transform the coordinate."
.format(coord_frame_cls.__name__,
frame_cls.__name__))
if frame_cls is None:
frame_cls = ICRS
# By now, frame_cls should be set - if it's not, something went wrong
if not issubclass(frame_cls, BaseCoordinateFrame):
# We should hopefully never get here...
raise ValueError('Frame class has unexpected type: {0}'
.format(frame_cls.__name__))
for attr in frame_cls.frame_attributes:
if attr in kwargs:
frame_cls_kwargs[attr] = kwargs.pop(attr)
# TODO: deprecate representation, remove this in future
_normalize_representation_type(kwargs)
if 'representation_type' in kwargs:
frame_cls_kwargs['representation_type'] = _get_repr_cls(
kwargs.pop('representation_type'))
differential_type = kwargs.pop('differential_type', None)
if differential_type is not None:
frame_cls_kwargs['differential_type'] = _get_diff_cls(
differential_type)
return frame_cls, frame_cls_kwargs
def _parse_coordinate_data(frame, args, kwargs):
"""
Extract coordinate data from the args and kwargs passed to SkyCoord.
By this point, we assume that all of the frame attributes have been
extracted from kwargs (see _get_frame_without_data()), so all that are left
are (1) extra SkyCoord attributes, and (2) the coordinate data, specified in
any of the valid ways.
"""
valid_skycoord_kwargs = {}
valid_components = {}
info = None
# Look through the remaining kwargs to see if any are valid attribute names
# by asking the frame transform graph:
attr_names = list(kwargs.keys())
for attr in attr_names:
if attr in frame_transform_graph.frame_attributes:
valid_skycoord_kwargs[attr] = kwargs.pop(attr)
# By this point in parsing the arguments, anything left in the args and
# kwargs should be data. Either as individual components, or a list of
# objects, or a representation, etc.
# Get units of components
units = _get_representation_component_units(args, kwargs)
# Grab any frame-specific attr names like `ra` or `l` or `distance` from
# kwargs and move them to valid_components.
valid_components.update(_get_representation_attrs(frame, units, kwargs))
# Error if anything is still left in kwargs
if kwargs:
# The next few lines add a more user-friendly error message to a
# common and confusing situation when the user specifies, e.g.,
# `pm_ra` when they really should be passing `pm_ra_cosdec`. The
# extra error should only turn on when the positional representation
# is spherical, and when the component 'pm_<lon>' is passed.
pm_message = ''
if frame.representation_type == SphericalRepresentation:
frame_names = list(frame.get_representation_component_names().keys())
lon_name = frame_names[0]
lat_name = frame_names[1]
if 'pm_{0}'.format(lon_name) in list(kwargs.keys()):
pm_message = ('\n\n By default, most frame classes expect '
'the longitudinal proper motion to include '
'the cos(latitude) term, named '
'`pm_{0}_cos{1}`. Did you mean to pass in '
'this component?'
.format(lon_name, lat_name))
raise ValueError('Unrecognized keyword argument(s) {0}{1}'
.format(', '.join("'{0}'".format(key)
for key in kwargs),
pm_message))
# Finally deal with the unnamed args. This figures out what the arg[0]
# is and returns a dict with appropriate key/values for initializing
# frame class. Note that differentials are *never* valid args, only
# kwargs. So they are not accounted for here (unless they're in a frame
# or SkyCoord object)
if args:
if len(args) == 1:
# One arg which must be a coordinate. In this case coord_kwargs
# will contain keys like 'ra', 'dec', 'distance' along with any
# frame attributes like equinox or obstime which were explicitly
# specified in the coordinate object (i.e. non-default).
_skycoord_kwargs, _components = _parse_coordinate_arg(
args[0], frame, units, kwargs)
# Copy other 'info' attr only if it has actually been defined.
if 'info' in getattr(args[0], '__dict__', ()):
info = args[0].info
elif len(args) <= 3:
_skycoord_kwargs = {}
_components = {}
frame_attr_names = frame.representation_component_names.keys()
repr_attr_names = frame.representation_component_names.values()
for arg, frame_attr_name, repr_attr_name, unit in zip(args, frame_attr_names,
repr_attr_names, units):
attr_class = frame.representation_type.attr_classes[repr_attr_name]
_components[frame_attr_name] = attr_class(arg, unit=unit)
else:
raise ValueError('Must supply no more than three positional arguments, got {}'
.format(len(args)))
# The next two loops copy the component and skycoord attribute data into
# their final, respective "valid_" dictionaries. For each, we check that
# there are no relevant conflicts with values specified by the user
# through other means:
# First validate the component data
for attr, coord_value in _components.items():
if attr in valid_components:
raise ValueError(_conflict_err_msg
.format(attr, coord_value,
valid_components[attr], 'SkyCoord'))
valid_components[attr] = coord_value
# Now validate the custom SkyCoord attributes
for attr, value in _skycoord_kwargs.items():
if (attr in valid_skycoord_kwargs and
np.any(valid_skycoord_kwargs[attr] != value)):
raise ValueError(_conflict_err_msg
.format(attr, value,
valid_skycoord_kwargs[attr],
'SkyCoord'))
valid_skycoord_kwargs[attr] = value
return valid_skycoord_kwargs, valid_components, info
def _get_representation_component_units(args, kwargs):
"""
Get the unit from kwargs for the *representation* components (not the
differentials).
"""
if 'unit' not in kwargs:
units = [None, None, None]
else:
units = kwargs.pop('unit')
if isinstance(units, str):
units = [x.strip() for x in units.split(',')]
# Allow for input like unit='deg' or unit='m'
if len(units) == 1:
units = [units[0], units[0], units[0]]
elif isinstance(units, (Unit, IrreducibleUnit)):
units = [units, units, units]
try:
units = [(Unit(x) if x else None) for x in units]
units.extend(None for x in range(3 - len(units)))
if len(units) > 3:
raise ValueError()
except Exception:
raise ValueError('Unit keyword must have one to three unit values as '
'tuple or comma-separated string')
return units
def _parse_coordinate_arg(coords, frame, units, init_kwargs):
"""
Single unnamed arg supplied. This must be:
- Coordinate frame with data
- Representation
- SkyCoord
- List or tuple of:
- String which splits into two values
- Iterable with two values
- SkyCoord, frame, or representation objects.
Returns a dict mapping coordinate attribute names to values (or lists of
values)
"""
from .sky_coordinate import SkyCoord
is_scalar = False # Differentiate between scalar and list input
# valid_kwargs = {} # Returned dict of lon, lat, and distance (optional)
components = {}
skycoord_kwargs = {}
frame_attr_names = list(frame.representation_component_names.keys())
repr_attr_names = list(frame.representation_component_names.values())
repr_attr_classes = list(frame.representation_type.attr_classes.values())
n_attr_names = len(repr_attr_names)
# Turn a single string into a list of strings for convenience
if isinstance(coords, str):
is_scalar = True
coords = [coords]
if isinstance(coords, (SkyCoord, BaseCoordinateFrame)):
# Note that during parsing of `frame` it is checked that any coordinate
# args have the same frame as explicitly supplied, so don't worry here.
if not coords.has_data:
raise ValueError('Cannot initialize from a frame without coordinate data')
data = coords.data.represent_as(frame.representation_type)
values = [] # List of values corresponding to representation attrs
repr_attr_name_to_drop = []
for repr_attr_name in repr_attr_names:
# If coords did not have an explicit distance then don't include in initializers.
if (isinstance(coords.data, UnitSphericalRepresentation) and
repr_attr_name == 'distance'):
repr_attr_name_to_drop.append(repr_attr_name)
continue
# Get the value from `data` in the eventual representation
values.append(getattr(data, repr_attr_name))
# drop the ones that were skipped because they were distances
for nametodrop in repr_attr_name_to_drop:
nameidx = repr_attr_names.index(nametodrop)
del repr_attr_names[nameidx]
del units[nameidx]
del frame_attr_names[nameidx]
del repr_attr_classes[nameidx]
if coords.data.differentials and 's' in coords.data.differentials:
orig_vel = coords.data.differentials['s']
vel = coords.data.represent_as(frame.representation_type, frame.get_representation_cls('s')).differentials['s']
for frname, reprname in frame.get_representation_component_names('s').items():
if (reprname == 'd_distance' and not hasattr(orig_vel, reprname) and
'unit' in orig_vel.get_name()):
continue
values.append(getattr(vel, reprname))
units.append(None)
frame_attr_names.append(frname)
repr_attr_names.append(reprname)
repr_attr_classes.append(vel.attr_classes[reprname])
for attr in frame_transform_graph.frame_attributes:
value = getattr(coords, attr, None)
use_value = (isinstance(coords, SkyCoord)
or attr not in coords._attr_names_with_defaults)
if use_value and value is not None:
skycoord_kwargs[attr] = value
elif isinstance(coords, BaseRepresentation):
if coords.differentials and 's' in coords.differentials:
diffs = frame.get_representation_cls('s')
data = coords.represent_as(frame.representation_type, diffs)
values = [getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names]
for frname, reprname in frame.get_representation_component_names('s').items():
values.append(getattr(data.differentials['s'], reprname))
units.append(None)
frame_attr_names.append(frname)
repr_attr_names.append(reprname)
repr_attr_classes.append(data.differentials['s'].attr_classes[reprname])
else:
data = coords.represent_as(frame.representation_type)
values = [getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names]
elif (isinstance(coords, np.ndarray) and coords.dtype.kind in 'if'
and coords.ndim == 2 and coords.shape[1] <= 3):
# 2-d array of coordinate values. Handle specially for efficiency.
values = coords.transpose() # Iterates over repr attrs
elif isinstance(coords, (Sequence, np.ndarray)):
# Handles list-like input.
vals = []
is_ra_dec_representation = ('ra' in frame.representation_component_names and
'dec' in frame.representation_component_names)
coord_types = (SkyCoord, BaseCoordinateFrame, BaseRepresentation)
if any(isinstance(coord, coord_types) for coord in coords):
# this parsing path is used when there are coordinate-like objects
# in the list - instead of creating lists of values, we create
# SkyCoords from the list elements and then combine them.
scs = [SkyCoord(coord, **init_kwargs) for coord in coords]
# Check that all frames are equivalent
for sc in scs[1:]:
if not sc.is_equivalent_frame(scs[0]):
raise ValueError("List of inputs don't have equivalent "
"frames: {0} != {1}".format(sc, scs[0]))
# Now use the first to determine if they are all UnitSpherical
allunitsphrepr = isinstance(scs[0].data, UnitSphericalRepresentation)
# get the frame attributes from the first coord in the list, because
# from the above we know it matches all the others. First copy over
# the attributes that are in the frame itself, then copy over any
# extras in the SkyCoord
for fattrnm in scs[0].frame.frame_attributes:
skycoord_kwargs[fattrnm] = getattr(scs[0].frame, fattrnm)
for fattrnm in scs[0]._extra_frameattr_names:
skycoord_kwargs[fattrnm] = getattr(scs[0], fattrnm)
# Now combine the values, to be used below
values = []
for data_attr_name, repr_attr_name in zip(frame_attr_names, repr_attr_names):
if allunitsphrepr and repr_attr_name == 'distance':
# if they are *all* UnitSpherical, don't give a distance
continue
data_vals = []
for sc in scs:
data_val = getattr(sc, data_attr_name)
data_vals.append(data_val.reshape(1,) if sc.isscalar else data_val)
concat_vals = np.concatenate(data_vals)
# Hack because np.concatenate doesn't fully work with Quantity
if isinstance(concat_vals, u.Quantity):
concat_vals._unit = data_val.unit
values.append(concat_vals)
else:
# none of the elements are "frame-like"
# turn into a list of lists like [[v1_0, v2_0, v3_0], ... [v1_N, v2_N, v3_N]]
for coord in coords:
if isinstance(coord, str):
coord1 = coord.split()
if len(coord1) == 6:
coord = (' '.join(coord1[:3]), ' '.join(coord1[3:]))
elif is_ra_dec_representation:
coord = _parse_ra_dec(coord)
else:
coord = coord1
vals.append(coord) # Assumes coord is a sequence at this point
# Do some basic validation of the list elements: all have a length and all
# lengths the same
try:
n_coords = sorted(set(len(x) for x in vals))
except Exception:
raise ValueError('One or more elements of input sequence does not have a length')
if len(n_coords) > 1:
raise ValueError('Input coordinate values must have same number of elements, found {0}'
.format(n_coords))
n_coords = n_coords[0]
# Must have no more coord inputs than representation attributes
if n_coords > n_attr_names:
raise ValueError('Input coordinates have {0} values but '
'representation {1} only accepts {2}'
.format(n_coords,
frame.representation_type.get_name(),
n_attr_names))
# Now transpose vals to get [(v1_0 .. v1_N), (v2_0 .. v2_N), (v3_0 .. v3_N)]
# (ok since we know it is exactly rectangular). (Note: can't just use zip(*values)
# because Longitude et al distinguishes list from tuple so [a1, a2, ..] is needed
# while (a1, a2, ..) doesn't work.
values = [list(x) for x in zip(*vals)]
if is_scalar:
values = [x[0] for x in values]
else:
raise ValueError('Cannot parse coordinates from first argument')
# Finally we have a list of values from which to create the keyword args
# for the frame initialization. Validate by running through the appropriate
# class initializer and supply units (which might be None).
try:
for frame_attr_name, repr_attr_class, value, unit in zip(
frame_attr_names, repr_attr_classes, values, units):
components[frame_attr_name] = repr_attr_class(value, unit=unit,
copy=False)
except Exception as err:
raise ValueError('Cannot parse first argument data "{0}" for attribute '
'{1}'.format(value, frame_attr_name), err)
return skycoord_kwargs, components
def _get_representation_attrs(frame, units, kwargs):
"""
Find instances of the "representation attributes" for specifying data
for this frame. Pop them off of kwargs, run through the appropriate class
constructor (to validate and apply unit), and put into the output
valid_kwargs. "Representation attributes" are the frame-specific aliases
for the underlying data values in the representation, e.g. "ra" for "lon"
for many equatorial spherical representations, or "w" for "x" in the
cartesian representation of Galactic.
This also gets any *differential* kwargs, because they go into the same
frame initializer later on.
"""
frame_attr_names = frame.representation_component_names.keys()
repr_attr_classes = frame.representation_type.attr_classes.values()
valid_kwargs = {}
for frame_attr_name, repr_attr_class, unit in zip(frame_attr_names, repr_attr_classes, units):
value = kwargs.pop(frame_attr_name, None)
if value is not None:
valid_kwargs[frame_attr_name] = repr_attr_class(value, unit=unit)
# also check the differentials. They aren't included in the units keyword,
# so we only look for the names.
differential_type = frame.differential_type
if differential_type is not None:
for frame_name, repr_name in frame.get_representation_component_names('s').items():
diff_attr_class = differential_type.attr_classes[repr_name]
value = kwargs.pop(frame_name, None)
if value is not None:
valid_kwargs[frame_name] = diff_attr_class(value)
return valid_kwargs
def _parse_ra_dec(coord_str):
"""
Parse RA and Dec values from a coordinate string. Currently the
following formats are supported:
* space separated 6-value format
* space separated <6-value format, this requires a plus or minus sign
separation between RA and Dec
* sign separated format
* JHHMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits
* JDDDMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits
Parameters
----------
coord_str : str
Coordinate string to parse.
Returns
-------
coord : str or list of str
Parsed coordinate values.
"""
if isinstance(coord_str, str):
coord1 = coord_str.split()
else:
# This exception should never be raised from SkyCoord
raise TypeError('coord_str must be a single str')
if len(coord1) == 6:
coord = (' '.join(coord1[:3]), ' '.join(coord1[3:]))
elif len(coord1) > 2:
coord = PLUS_MINUS_RE.split(coord_str)
coord = (coord[0], ' '.join(coord[1:]))
elif len(coord1) == 1:
match_j = J_PREFIXED_RA_DEC_RE.match(coord_str)
if match_j:
coord = match_j.groups()
if len(coord[0].split('.')[0]) == 7:
coord = ('{0} {1} {2}'.
format(coord[0][0:3], coord[0][3:5], coord[0][5:]),
'{0} {1} {2}'.
format(coord[1][0:3], coord[1][3:5], coord[1][5:]))
else:
coord = ('{0} {1} {2}'.
format(coord[0][0:2], coord[0][2:4], coord[0][4:]),
'{0} {1} {2}'.
format(coord[1][0:3], coord[1][3:5], coord[1][5:]))
else:
coord = PLUS_MINUS_RE.split(coord_str)
coord = (coord[0], ' '.join(coord[1:]))
else:
coord = coord1
return coord
|
097a79b1ea4c6173bed004bf0312dc10b4d138316038dab61d8ce7c384c186be | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Framework and base classes for coordinate frames/"low-level" coordinate
classes.
"""
# Standard library
import abc
import copy
import inspect
from collections import namedtuple, OrderedDict, defaultdict
import warnings
# Dependencies
import numpy as np
# Project
from astropy.utils.compat.misc import override__dir__
from astropy.utils.decorators import lazyproperty, format_doc
from astropy.utils.exceptions import AstropyWarning, AstropyDeprecationWarning
from astropy import units as u
from astropy.utils import (OrderedDescriptorContainer, ShapedLikeNDArray,
check_broadcast)
from .transformations import TransformGraph
from . import representation as r
from .angles import Angle
from .attributes import Attribute
# Import old names for Attributes so we don't break backwards-compatibility
# (some users rely on them being here, although that is not encouraged, as this
# is not the public API location -- see attributes.py).
from .attributes import (
TimeFrameAttribute, QuantityFrameAttribute,
EarthLocationAttribute, CoordinateAttribute,
CartesianRepresentationFrameAttribute) # pylint: disable=W0611
__all__ = ['BaseCoordinateFrame', 'frame_transform_graph',
'GenericFrame', 'RepresentationMapping']
# the graph used for all transformations between frames
frame_transform_graph = TransformGraph()
def _get_repr_cls(value):
"""
Return a valid representation class from ``value`` or raise exception.
"""
if value in r.REPRESENTATION_CLASSES:
value = r.REPRESENTATION_CLASSES[value]
elif (not isinstance(value, type) or
not issubclass(value, r.BaseRepresentation)):
raise ValueError(
'Representation is {0!r} but must be a BaseRepresentation class '
'or one of the string aliases {1}'.format(
value, list(r.REPRESENTATION_CLASSES)))
return value
def _get_diff_cls(value):
"""
Return a valid differential class from ``value`` or raise exception.
As originally created, this is only used in the SkyCoord initializer, so if
that is refactored, this function my no longer be necessary.
"""
if value in r.DIFFERENTIAL_CLASSES:
value = r.DIFFERENTIAL_CLASSES[value]
elif (not isinstance(value, type) or
not issubclass(value, r.BaseDifferential)):
raise ValueError(
'Differential is {0!r} but must be a BaseDifferential class '
'or one of the string aliases {1}'.format(
value, list(r.DIFFERENTIAL_CLASSES)))
return value
def _get_repr_classes(base, **differentials):
"""Get valid representation and differential classes.
Parameters
----------
base : str or `~astropy.coordinates.BaseRepresentation` subclass
class for the representation of the base coordinates. If a string,
it is looked up among the known representation classes.
**differentials : dict of str or `~astropy.coordinates.BaseDifferentials`
Keys are like for normal differentials, i.e., 's' for a first
derivative in time, etc. If an item is set to `None`, it will be
guessed from the base class.
Returns
-------
repr_classes : dict of subclasses
The base class is keyed by 'base'; the others by the keys of
``diffferentials``.
"""
base = _get_repr_cls(base)
repr_classes = {'base': base}
for name, differential_type in differentials.items():
if differential_type == 'base':
# We don't want to fail for this case.
differential_type = r.DIFFERENTIAL_CLASSES.get(base.get_name(), None)
elif differential_type in r.DIFFERENTIAL_CLASSES:
differential_type = r.DIFFERENTIAL_CLASSES[differential_type]
elif (differential_type is not None and
(not isinstance(differential_type, type) or
not issubclass(differential_type, r.BaseDifferential))):
raise ValueError(
'Differential is {0!r} but must be a BaseDifferential class '
'or one of the string aliases {1}'.format(
differential_type, list(r.DIFFERENTIAL_CLASSES)))
repr_classes[name] = differential_type
return repr_classes
def _representation_deprecation():
"""
Raises a deprecation warning for the "representation" keyword
"""
warnings.warn('The `representation` keyword/property name is deprecated in '
'favor of `representation_type`', AstropyDeprecationWarning)
def _normalize_representation_type(kwargs):
""" This is added for backwards compatibility: if the user specifies the
old-style argument ``representation``, add it back in to the kwargs dict
as ``representation_type``.
"""
if 'representation' in kwargs:
if 'representation_type' in kwargs:
raise ValueError("Both `representation` and `representation_type` "
"were passed to a frame initializer. Please use "
"only `representation_type` (`representation` is "
"now pending deprecation).")
_representation_deprecation()
kwargs['representation_type'] = kwargs.pop('representation')
# Need to subclass ABCMeta as well, so that this meta class can be combined
# with ShapedLikeNDArray below (which is an ABC); without it, one gets
# "TypeError: metaclass conflict: the metaclass of a derived class must be a
# (non-strict) subclass of the metaclasses of all its bases"
class FrameMeta(OrderedDescriptorContainer, abc.ABCMeta):
def __new__(mcls, name, bases, members):
if 'default_representation' in members:
default_repr = members.pop('default_representation')
found_default_repr = True
else:
default_repr = None
found_default_repr = False
if 'default_differential' in members:
default_diff = members.pop('default_differential')
found_default_diff = True
else:
default_diff = None
found_default_diff = False
if 'frame_specific_representation_info' in members:
repr_info = members.pop('frame_specific_representation_info')
found_repr_info = True
else:
repr_info = None
found_repr_info = False
# somewhat hacky, but this is the best way to get the MRO according to
# https://mail.python.org/pipermail/python-list/2002-December/167861.html
tmp_cls = super().__new__(mcls, name, bases, members)
# now look through the whole MRO for the class attributes, raw for
# frame_attr_names, and leading underscore for others
for m in (c.__dict__ for c in tmp_cls.__mro__):
if not found_default_repr and '_default_representation' in m:
default_repr = m['_default_representation']
found_default_repr = True
if not found_default_diff and '_default_differential' in m:
default_diff = m['_default_differential']
found_default_diff = True
if (not found_repr_info and
'_frame_specific_representation_info' in m):
# create a copy of the dict so we don't mess with the contents
repr_info = m['_frame_specific_representation_info'].copy()
found_repr_info = True
if found_default_repr and found_default_diff and found_repr_info:
break
else:
raise ValueError(
'Could not find all expected BaseCoordinateFrame class '
'attributes. Are you mis-using FrameMeta?')
# Unless overridden via `frame_specific_representation_info`, velocity
# name defaults are (see also docstring for BaseCoordinateFrame):
# * ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for
# `SphericalCosLatDifferential` proper motion components
# * ``pm_{lon}``, ``pm_{lat}`` for `SphericalDifferential` proper
# motion components
# * ``radial_velocity`` for any `d_distance` component
# * ``v_{x,y,z}`` for `CartesianDifferential` velocity components
# where `{lon}` and `{lat}` are the frame names of the angular
# components.
if repr_info is None:
repr_info = {}
# the tuple() call below is necessary because if it is not there,
# the iteration proceeds in a difficult-to-predict manner in the
# case that one of the class objects hash is such that it gets
# revisited by the iteration. The tuple() call prevents this by
# making the items iterated over fixed regardless of how the dict
# changes
for cls_or_name in tuple(repr_info.keys()):
if isinstance(cls_or_name, str):
# TODO: this provides a layer of backwards compatibility in
# case the key is a string, but now we want explicit classes.
cls = _get_repr_cls(cls_or_name)
repr_info[cls] = repr_info.pop(cls_or_name)
# The default spherical names are 'lon' and 'lat'
repr_info.setdefault(r.SphericalRepresentation,
[RepresentationMapping('lon', 'lon'),
RepresentationMapping('lat', 'lat')])
sph_component_map = {m.reprname: m.framename
for m in repr_info[r.SphericalRepresentation]}
repr_info.setdefault(r.SphericalCosLatDifferential, [
RepresentationMapping(
'd_lon_coslat',
'pm_{lon}_cos{lat}'.format(**sph_component_map),
u.mas/u.yr),
RepresentationMapping('d_lat',
'pm_{lat}'.format(**sph_component_map),
u.mas/u.yr),
RepresentationMapping('d_distance', 'radial_velocity',
u.km/u.s)
])
repr_info.setdefault(r.SphericalDifferential, [
RepresentationMapping('d_lon',
'pm_{lon}'.format(**sph_component_map),
u.mas/u.yr),
RepresentationMapping('d_lat',
'pm_{lat}'.format(**sph_component_map),
u.mas/u.yr),
RepresentationMapping('d_distance', 'radial_velocity',
u.km/u.s)
])
repr_info.setdefault(r.CartesianDifferential, [
RepresentationMapping('d_x', 'v_x', u.km/u.s),
RepresentationMapping('d_y', 'v_y', u.km/u.s),
RepresentationMapping('d_z', 'v_z', u.km/u.s)])
# Unit* classes should follow the same naming conventions
# TODO: this adds some unnecessary mappings for the Unit classes, so
# this could be cleaned up, but in practice doesn't seem to have any
# negative side effects
repr_info.setdefault(r.UnitSphericalRepresentation,
repr_info[r.SphericalRepresentation])
repr_info.setdefault(r.UnitSphericalCosLatDifferential,
repr_info[r.SphericalCosLatDifferential])
repr_info.setdefault(r.UnitSphericalDifferential,
repr_info[r.SphericalDifferential])
# Make read-only properties for the frame class attributes that should
# be read-only to make them immutable after creation.
# We copy attributes instead of linking to make sure there's no
# accidental cross-talk between classes
mcls.readonly_prop_factory(members, 'default_representation',
default_repr)
mcls.readonly_prop_factory(members, 'default_differential',
default_diff)
mcls.readonly_prop_factory(members,
'frame_specific_representation_info',
copy.deepcopy(repr_info))
# now set the frame name as lower-case class name, if it isn't explicit
if 'name' not in members:
members['name'] = name.lower()
# A cache that *must be unique to each frame class* - it is
# insufficient to share them with superclasses, hence the need to put
# them in the meta
members['_frame_class_cache'] = {}
return super().__new__(mcls, name, bases, members)
@staticmethod
def readonly_prop_factory(members, attr, value):
private_attr = '_' + attr
def getter(self):
return getattr(self, private_attr)
members[private_attr] = value
members[attr] = property(getter)
_RepresentationMappingBase = \
namedtuple('RepresentationMapping',
('reprname', 'framename', 'defaultunit'))
class RepresentationMapping(_RepresentationMappingBase):
"""
This `~collections.namedtuple` is used with the
``frame_specific_representation_info`` attribute to tell frames what
attribute names (and default units) to use for a particular representation.
``reprname`` and ``framename`` should be strings, while ``defaultunit`` can
be either an astropy unit, the string ``'recommended'`` (to use whatever
the representation's ``recommended_units`` is), or None (to indicate that
no unit mapping should be done).
"""
def __new__(cls, reprname, framename, defaultunit='recommended'):
# this trick just provides some defaults
return super().__new__(cls, reprname, framename, defaultunit)
base_doc = """{__doc__}
Parameters
----------
data : `BaseRepresentation` subclass instance
A representation object or ``None`` to have no data (or use the
coordinate component arguments, see below).
{components}
representation_type : `BaseRepresentation` subclass, str, optional
A representation class or string name of a representation class. This
sets the expected input representation class, thereby changing the
expected keyword arguments for the data passed in. For example, passing
``representation_type='cartesian'`` will make the classes expect
position data with cartesian names, i.e. ``x, y, z`` in most cases.
differential_type : `BaseDifferential` subclass, str, dict, optional
A differential class or dictionary of differential classes (currently
only a velocity differential with key 's' is supported). This sets the
expected input differential class, thereby changing the expected keyword
arguments of the data passed in. For example, passing
``differential_type='cartesian'`` will make the classes expect velocity
data with the argument names ``v_x, v_y, v_z``.
copy : bool, optional
If `True` (default), make copies of the input coordinate arrays.
Can only be passed in as a keyword argument.
{footer}
"""
_components = """
*args, **kwargs
Coordinate components, with names that depend on the subclass.
"""
@format_doc(base_doc, components=_components, footer="")
class BaseCoordinateFrame(ShapedLikeNDArray, metaclass=FrameMeta):
"""
The base class for coordinate frames.
This class is intended to be subclassed to create instances of specific
systems. Subclasses can implement the following attributes:
* `default_representation`
A subclass of `~astropy.coordinates.BaseRepresentation` that will be
treated as the default representation of this frame. This is the
representation assumed by default when the frame is created.
* `default_differential`
A subclass of `~astropy.coordinates.BaseDifferential` that will be
treated as the default differential class of this frame. This is the
differential class assumed by default when the frame is created.
* `~astropy.coordinates.Attribute` class attributes
Frame attributes such as ``FK4.equinox`` or ``FK4.obstime`` are defined
using a descriptor class. See the narrative documentation or
built-in classes code for details.
* `frame_specific_representation_info`
A dictionary mapping the name or class of a representation to a list of
`~astropy.coordinates.RepresentationMapping` objects that tell what
names and default units should be used on this frame for the components
of that representation.
Unless overridden via `frame_specific_representation_info`, velocity name
defaults are:
* ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for `SphericalCosLatDifferential`
proper motion components
* ``pm_{lon}``, ``pm_{lat}`` for `SphericalDifferential` proper motion
components
* ``radial_velocity`` for any ``d_distance`` component
* ``v_{x,y,z}`` for `CartesianDifferential` velocity components
where ``{lon}`` and ``{lat}`` are the frame names of the angular components.
"""
default_representation = None
default_differential = None
# Specifies special names and units for representation and differential
# attributes.
frame_specific_representation_info = {}
_inherit_descriptors_ = (Attribute,)
frame_attributes = OrderedDict()
# Default empty frame_attributes dict
def __init__(self, *args, copy=True, representation_type=None,
differential_type=None, **kwargs):
self._attr_names_with_defaults = []
# This is here for backwards compatibility. It should be possible
# to use either the kwarg representation_type, or representation.
if representation_type is not None:
kwargs['representation_type'] = representation_type
_normalize_representation_type(kwargs)
representation_type = kwargs.pop('representation_type', representation_type)
if representation_type is not None or differential_type is not None:
if representation_type is None:
representation_type = self.default_representation
if (inspect.isclass(differential_type) and
issubclass(differential_type, r.BaseDifferential)):
# TODO: assumes the differential class is for the velocity
# differential
differential_type = {'s': differential_type}
elif isinstance(differential_type, str):
# TODO: assumes the differential class is for the velocity
# differential
diff_cls = r.DIFFERENTIAL_CLASSES[differential_type]
differential_type = {'s': diff_cls}
elif differential_type is None:
if representation_type == self.default_representation:
differential_type = {'s': self.default_differential}
else:
differential_type = {'s': 'base'} # see set_representation_cls()
self.set_representation_cls(representation_type,
**differential_type)
# if not set below, this is a frame with no data
representation_data = None
differential_data = None
args = list(args) # need to be able to pop them
if (len(args) > 0) and (isinstance(args[0], r.BaseRepresentation) or
args[0] is None):
representation_data = args.pop(0)
if len(args) > 0:
raise TypeError(
'Cannot create a frame with both a representation object '
'and other positional arguments')
if representation_data is not None:
diffs = representation_data.differentials
differential_data = diffs.get('s', None)
if ((differential_data is None and len(diffs) > 0) or
(differential_data is not None and len(diffs) > 1)):
raise ValueError('Multiple differentials are associated '
'with the representation object passed in '
'to the frame initializer. Only a single '
'velocity differential is supported. Got: '
'{0}'.format(diffs))
elif self.representation_type:
representation_cls = self.get_representation_cls()
# Get any representation data passed in to the frame initializer
# using keyword or positional arguments for the component names
repr_kwargs = {}
for nmkw, nmrep in self.representation_component_names.items():
if len(args) > 0:
# first gather up positional args
repr_kwargs[nmrep] = args.pop(0)
elif nmkw in kwargs:
repr_kwargs[nmrep] = kwargs.pop(nmkw)
# special-case the Spherical->UnitSpherical if no `distance`
if repr_kwargs:
# TODO: determine how to get rid of the part before the "try" -
# currently removing it has a performance regression for
# unitspherical because of the try-related overhead.
# Also frames have no way to indicate what the "distance" is
if repr_kwargs.get('distance', True) is None:
del repr_kwargs['distance']
if (issubclass(representation_cls, r.SphericalRepresentation)
and 'distance' not in repr_kwargs):
representation_cls = representation_cls._unit_representation
try:
representation_data = representation_cls(copy=copy,
**repr_kwargs)
except TypeError as e:
# this except clause is here to make the names of the
# attributes more human-readable. Without this the names
# come from the representation instead of the frame's
# attribute names.
try:
representation_data = representation_cls._unit_representation(copy=copy,
**repr_kwargs)
except Exception as e2:
msg = str(e)
names = self.get_representation_component_names()
for frame_name, repr_name in names.items():
msg = msg.replace(repr_name, frame_name)
msg = msg.replace('__init__()',
'{0}()'.format(self.__class__.__name__))
e.args = (msg,)
raise e
# Now we handle the Differential data:
# Get any differential data passed in to the frame initializer
# using keyword or positional arguments for the component names
differential_cls = self.get_representation_cls('s')
diff_component_names = self.get_representation_component_names('s')
diff_kwargs = {}
for nmkw, nmrep in diff_component_names.items():
if len(args) > 0:
# first gather up positional args
diff_kwargs[nmrep] = args.pop(0)
elif nmkw in kwargs:
diff_kwargs[nmrep] = kwargs.pop(nmkw)
if diff_kwargs:
if (hasattr(differential_cls, '_unit_differential') and
'd_distance' not in diff_kwargs):
differential_cls = differential_cls._unit_differential
elif len(diff_kwargs) == 1 and 'd_distance' in diff_kwargs:
differential_cls = r.RadialDifferential
try:
differential_data = differential_cls(copy=copy,
**diff_kwargs)
except TypeError as e:
# this except clause is here to make the names of the
# attributes more human-readable. Without this the names
# come from the representation instead of the frame's
# attribute names.
msg = str(e)
names = self.get_representation_component_names('s')
for frame_name, repr_name in names.items():
msg = msg.replace(repr_name, frame_name)
msg = msg.replace('__init__()',
'{0}()'.format(self.__class__.__name__))
e.args = (msg,)
raise
if len(args) > 0:
raise TypeError(
'{0}.__init__ had {1} remaining unhandled arguments'.format(
self.__class__.__name__, len(args)))
if representation_data is None and differential_data is not None:
raise ValueError("Cannot pass in differential component data "
"without positional (representation) data.")
if differential_data:
self._data = representation_data.with_differentials(
{'s': differential_data})
else:
self._data = representation_data # possibly None.
values = {}
for fnm, fdefault in self.get_frame_attr_names().items():
# Read-only frame attributes are defined as FrameAttribue
# descriptors which are not settable, so set 'real' attributes as
# the name prefaced with an underscore.
if fnm in kwargs:
value = kwargs.pop(fnm)
setattr(self, '_' + fnm, value)
# Validate attribute by getting it. If the instance has data,
# this also checks its shape is OK. If not, we do it below.
values[fnm] = getattr(self, fnm)
else:
setattr(self, '_' + fnm, fdefault)
self._attr_names_with_defaults.append(fnm)
if kwargs:
raise TypeError(
'Coordinate frame got unexpected keywords: {0}'.format(
list(kwargs)))
# We do ``is None`` because self._data might evaluate to false for
# empty arrays or data == 0
if self._data is None:
# No data: we still need to check that any non-scalar attributes
# have consistent shapes. Collect them for all attributes with
# size > 1 (which should be array-like and thus have a shape).
shapes = {fnm: value.shape for fnm, value in values.items()
if getattr(value, 'size', 1) > 1}
if shapes:
if len(shapes) > 1:
try:
self._no_data_shape = check_broadcast(*shapes.values())
except ValueError:
raise ValueError(
"non-scalar attributes with inconsistent "
"shapes: {0}".format(shapes))
# Above, we checked that it is possible to broadcast all
# shapes. By getting and thus validating the attributes,
# we verify that the attributes can in fact be broadcast.
for fnm in shapes:
getattr(self, fnm)
else:
self._no_data_shape = shapes.popitem()[1]
else:
self._no_data_shape = ()
else:
# This makes the cache keys backwards-compatible, but also adds
# support for having differentials attached to the frame data
# representation object.
if 's' in self._data.differentials:
# TODO: assumes a velocity unit differential
key = (self._data.__class__.__name__,
self._data.differentials['s'].__class__.__name__,
False)
else:
key = (self._data.__class__.__name__, False)
# Set up representation cache.
self.cache['representation'][key] = self._data
@lazyproperty
def cache(self):
"""
Cache for this frame, a dict. It stores anything that should be
computed from the coordinate data (*not* from the frame attributes).
This can be used in functions to store anything that might be
expensive to compute but might be re-used by some other function.
E.g.::
if 'user_data' in myframe.cache:
data = myframe.cache['user_data']
else:
myframe.cache['user_data'] = data = expensive_func(myframe.lat)
If in-place modifications are made to the frame data, the cache should
be cleared::
myframe.cache.clear()
"""
return defaultdict(dict)
@property
def data(self):
"""
The coordinate data for this object. If this frame has no data, an
`ValueError` will be raised. Use `has_data` to
check if data is present on this frame object.
"""
if self._data is None:
raise ValueError('The frame object "{0!r}" does not have '
'associated data'.format(self))
return self._data
@property
def has_data(self):
"""
True if this frame has `data`, False otherwise.
"""
return self._data is not None
@property
def shape(self):
return self.data.shape if self.has_data else self._no_data_shape
# We have to override the ShapedLikeNDArray definitions, since our shape
# does not have to be that of the data.
def __len__(self):
return len(self.data)
def __bool__(self):
return self.has_data and self.size > 0
@property
def size(self):
return self.data.size
@property
def isscalar(self):
return self.has_data and self.data.isscalar
@classmethod
def get_frame_attr_names(cls):
return OrderedDict((name, getattr(cls, name))
for name in cls.frame_attributes)
def get_representation_cls(self, which='base'):
"""The class used for part of this frame's data.
Parameters
----------
which : ('base', 's', `None`)
The class of which part to return. 'base' means the class used to
represent the coordinates; 's' the first derivative to time, i.e.,
the class representing the proper motion and/or radial velocity.
If `None`, return a dict with both.
Returns
-------
representation : `~astropy.coordinates.BaseRepresentation` or `~astropy.coordinates.BaseDifferential`.
"""
if not hasattr(self, '_representation'):
self._representation = {'base': self.default_representation,
's': self.default_differential}
if which is not None:
return self._representation[which]
else:
return self._representation
def set_representation_cls(self, base=None, s='base'):
"""Set representation and/or differential class for this frame's data.
Parameters
----------
base : str, `~astropy.coordinates.BaseRepresentation` subclass, optional
The name or subclass to use to represent the coordinate data.
s : `~astropy.coordinates.BaseDifferential` subclass, optional
The differential subclass to use to represent any velocities,
such as proper motion and radial velocity. If equal to 'base',
which is the default, it will be inferred from the representation.
If `None`, the representation will drop any differentials.
"""
if base is None:
base = self._representation['base']
self._representation = _get_repr_classes(base=base, s=s)
representation_type = property(
fget=get_representation_cls, fset=set_representation_cls,
doc="""The representation class used for this frame's data.
This will be a subclass from `~astropy.coordinates.BaseRepresentation`.
Can also be *set* using the string name of the representation. If you
wish to set an explicit differential class (rather than have it be
inferred), use the ``set_represenation_cls`` method.
""")
@property
def differential_type(self):
"""
The differential used for this frame's data.
This will be a subclass from `~astropy.coordinates.BaseDifferential`.
For simultaneous setting of representation and differentials, see the
``set_represenation_cls`` method.
"""
return self.get_representation_cls('s')
@differential_type.setter
def differential_type(self, value):
self.set_representation_cls(s=value)
# TODO: remove these in a future version
@property
def representation(self):
_representation_deprecation()
return self.representation_type
@representation.setter
def representation(self, value):
_representation_deprecation()
self.representation_type = value
@classmethod
def _get_representation_info(cls):
# This exists as a class method only to support handling frame inputs
# without units, which are deprecated and will be removed. This can be
# moved into the representation_info property at that time.
# note that if so moved, the cache should be acceessed as
# self.__class__._frame_class_cache
if cls._frame_class_cache.get('last_reprdiff_hash', None) != r.get_reprdiff_cls_hash():
repr_attrs = {}
for repr_diff_cls in (list(r.REPRESENTATION_CLASSES.values()) +
list(r.DIFFERENTIAL_CLASSES.values())):
repr_attrs[repr_diff_cls] = {'names': [], 'units': []}
for c, c_cls in repr_diff_cls.attr_classes.items():
repr_attrs[repr_diff_cls]['names'].append(c)
# TODO: when "recommended_units" is removed, just directly use
# the default part here.
rec_unit = repr_diff_cls._recommended_units.get(
c, u.deg if issubclass(c_cls, Angle) else None)
repr_attrs[repr_diff_cls]['units'].append(rec_unit)
for repr_diff_cls, mappings in cls._frame_specific_representation_info.items():
# take the 'names' and 'units' tuples from repr_attrs,
# and then use the RepresentationMapping objects
# to update as needed for this frame.
nms = repr_attrs[repr_diff_cls]['names']
uns = repr_attrs[repr_diff_cls]['units']
comptomap = dict([(m.reprname, m) for m in mappings])
for i, c in enumerate(repr_diff_cls.attr_classes.keys()):
if c in comptomap:
mapp = comptomap[c]
nms[i] = mapp.framename
# need the isinstance because otherwise if it's a unit it
# will try to compare to the unit string representation
if not (isinstance(mapp.defaultunit, str) and
mapp.defaultunit == 'recommended'):
uns[i] = mapp.defaultunit
# else we just leave it as recommended_units says above
# Convert to tuples so that this can't mess with frame internals
repr_attrs[repr_diff_cls]['names'] = tuple(nms)
repr_attrs[repr_diff_cls]['units'] = tuple(uns)
cls._frame_class_cache['representation_info'] = repr_attrs
cls._frame_class_cache['last_reprdiff_hash'] = r.get_reprdiff_cls_hash()
return cls._frame_class_cache['representation_info']
@lazyproperty
def representation_info(self):
"""
A dictionary with the information of what attribute names for this frame
apply to particular representations.
"""
return self._get_representation_info()
def get_representation_component_names(self, which='base'):
out = OrderedDict()
repr_or_diff_cls = self.get_representation_cls(which)
if repr_or_diff_cls is None:
return out
data_names = repr_or_diff_cls.attr_classes.keys()
repr_names = self.representation_info[repr_or_diff_cls]['names']
for repr_name, data_name in zip(repr_names, data_names):
out[repr_name] = data_name
return out
def get_representation_component_units(self, which='base'):
out = OrderedDict()
repr_or_diff_cls = self.get_representation_cls(which)
if repr_or_diff_cls is None:
return out
repr_attrs = self.representation_info[repr_or_diff_cls]
repr_names = repr_attrs['names']
repr_units = repr_attrs['units']
for repr_name, repr_unit in zip(repr_names, repr_units):
if repr_unit:
out[repr_name] = repr_unit
return out
representation_component_names = property(get_representation_component_names)
representation_component_units = property(get_representation_component_units)
def _replicate(self, data, copy=False, **kwargs):
"""Base for replicating a frame, with possibly different attributes.
Produces a new instance of the frame using the attributes of the old
frame (unless overridden) and with the data given.
Parameters
----------
data : `~astropy.coordinates.BaseRepresentation` or `None`
Data to use in the new frame instance. If `None`, it will be
a data-less frame.
copy : bool, optional
Whether data and the attributes on the old frame should be copied
(default), or passed on by reference.
**kwargs
Any attributes that should be overridden.
"""
# This is to provide a slightly nicer error message if the user tries
# to use frame_obj.representation instead of frame_obj.data to get the
# underlying representation object [e.g., #2890]
if inspect.isclass(data):
raise TypeError('Class passed as data instead of a representation '
'instance. If you called frame.representation, this'
' returns the representation class. frame.data '
'returns the instantiated object - you may want to '
' use this instead.')
if copy and data is not None:
data = data.copy()
for attr in self.get_frame_attr_names():
if (attr not in self._attr_names_with_defaults and
attr not in kwargs):
value = getattr(self, attr)
if copy:
value = value.copy()
kwargs[attr] = value
return self.__class__(data, copy=False, **kwargs)
def replicate(self, copy=False, **kwargs):
"""
Return a replica of the frame, optionally with new frame attributes.
The replica is a new frame object that has the same data as this frame
object and with frame attributes overridden if they are provided as extra
keyword arguments to this method. If ``copy`` is set to `True` then a
copy of the internal arrays will be made. Otherwise the replica will
use a reference to the original arrays when possible to save memory. The
internal arrays are normally not changeable by the user so in most cases
it should not be necessary to set ``copy`` to `True`.
Parameters
----------
copy : bool, optional
If True, the resulting object is a copy of the data. When False,
references are used where possible. This rule also applies to the
frame attributes.
Any additional keywords are treated as frame attributes to be set on the
new frame object.
Returns
-------
frameobj : same as this frame
Replica of this object, but possibly with new frame attributes.
"""
return self._replicate(self.data, copy=copy, **kwargs)
def replicate_without_data(self, copy=False, **kwargs):
"""
Return a replica without data, optionally with new frame attributes.
The replica is a new frame object without data but with the same frame
attributes as this object, except where overridden by extra keyword
arguments to this method. The ``copy`` keyword determines if the frame
attributes are truly copied vs being references (which saves memory for
cases where frame attributes are large).
This method is essentially the converse of `realize_frame`.
Parameters
----------
copy : bool, optional
If True, the resulting object has copies of the frame attributes.
When False, references are used where possible.
Any additional keywords are treated as frame attributes to be set on the
new frame object.
Returns
-------
frameobj : same as this frame
Replica of this object, but without data and possibly with new frame
attributes.
"""
return self._replicate(None, copy=copy, **kwargs)
def realize_frame(self, data):
"""
Generates a new frame with new data from another frame (which may or
may not have data). Roughly speaking, the converse of
`replicate_without_data`.
Parameters
----------
data : `BaseRepresentation`
The representation to use as the data for the new frame.
Returns
-------
frameobj : same as this frame
A new object with the same frame attributes as this one, but
with the ``data`` as the coordinate data.
"""
return self._replicate(data)
def represent_as(self, base, s='base', in_frame_units=False):
"""
Generate and return a new representation of this frame's `data`
as a Representation object.
Note: In order to make an in-place change of the representation
of a Frame or SkyCoord object, set the ``representation``
attribute of that object to the desired new representation, or
use the ``set_representation_cls`` method to also set the differential.
Parameters
----------
base : subclass of BaseRepresentation or string
The type of representation to generate. Must be a *class*
(not an instance), or the string name of the representation
class.
s : subclass of `~astropy.coordinates.BaseDifferential`, str, optional
Class in which any velocities should be represented. Must be
a *class* (not an instance), or the string name of the
differential class. If equal to 'base' (default), inferred from
the base class. If `None`, all velocity information is dropped.
in_frame_units : bool, keyword only
Force the representation units to match the specified units
particular to this frame
Returns
-------
newrep : BaseRepresentation-derived object
A new representation object of this frame's `data`.
Raises
------
AttributeError
If this object had no `data`
Examples
--------
>>> from astropy import units as u
>>> from astropy.coordinates import SkyCoord, CartesianRepresentation
>>> coord = SkyCoord(0*u.deg, 0*u.deg)
>>> coord.represent_as(CartesianRepresentation) # doctest: +FLOAT_CMP
<CartesianRepresentation (x, y, z) [dimensionless]
(1., 0., 0.)>
>>> coord.representation_type = CartesianRepresentation
>>> coord # doctest: +FLOAT_CMP
<SkyCoord (ICRS): (x, y, z) [dimensionless]
(1., 0., 0.)>
"""
# For backwards compatibility (because in_frame_units used to be the
# 2nd argument), we check to see if `new_differential` is a boolean. If
# it is, we ignore the value of `new_differential` and warn about the
# position change
if isinstance(s, bool):
warnings.warn("The argument position for `in_frame_units` in "
"`represent_as` has changed. Use as a keyword "
"argument if needed.", AstropyWarning)
in_frame_units = s
s = 'base'
# In the future, we may want to support more differentials, in which
# case one probably needs to define **kwargs above and use it here.
# But for now, we only care about the velocity.
repr_classes = _get_repr_classes(base=base, s=s)
representation_cls = repr_classes['base']
# We only keep velocity information
if 's' in self.data.differentials:
differential_cls = repr_classes['s']
elif s is None or s == 'base':
differential_cls = None
else:
raise TypeError('Frame data has no associated differentials '
'(i.e. the frame has no velocity data) - '
'represent_as() only accepts a new '
'representation.')
if differential_cls:
cache_key = (representation_cls.__name__,
differential_cls.__name__, in_frame_units)
else:
cache_key = (representation_cls.__name__, in_frame_units)
cached_repr = self.cache['representation'].get(cache_key)
if not cached_repr:
if differential_cls:
# TODO NOTE: only supports a single differential
data = self.data.represent_as(representation_cls,
differential_cls)
diff = data.differentials['s'] # TODO: assumes velocity
else:
data = self.data.represent_as(representation_cls)
# If the new representation is known to this frame and has a defined
# set of names and units, then use that.
new_attrs = self.representation_info.get(representation_cls)
if new_attrs and in_frame_units:
datakwargs = dict((comp, getattr(data, comp))
for comp in data.components)
for comp, new_attr_unit in zip(data.components, new_attrs['units']):
if new_attr_unit:
datakwargs[comp] = datakwargs[comp].to(new_attr_unit)
data = data.__class__(copy=False, **datakwargs)
if differential_cls:
# the original differential
data_diff = self.data.differentials['s']
# If the new differential is known to this frame and has a
# defined set of names and units, then use that.
new_attrs = self.representation_info.get(differential_cls)
if new_attrs and in_frame_units:
diffkwargs = dict((comp, getattr(diff, comp))
for comp in diff.components)
for comp, new_attr_unit in zip(diff.components,
new_attrs['units']):
# Some special-casing to treat a situation where the
# input data has a UnitSphericalDifferential or a
# RadialDifferential. It is re-represented to the
# frame's differential class (which might be, e.g., a
# dimensional Differential), so we don't want to try to
# convert the empty component units
if (isinstance(data_diff,
(r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential)) and
comp not in data_diff.__class__.attr_classes):
continue
elif (isinstance(data_diff, r.RadialDifferential) and
comp not in data_diff.__class__.attr_classes):
continue
if new_attr_unit and hasattr(diff, comp):
diffkwargs[comp] = diffkwargs[comp].to(new_attr_unit)
diff = diff.__class__(copy=False, **diffkwargs)
# Here we have to bypass using with_differentials() because
# it has a validation check. But because
# .representation_type and .differential_type don't point to
# the original classes, if the input differential is a
# RadialDifferential, it usually gets turned into a
# SphericalCosLatDifferential (or whatever the default is)
# with strange units for the d_lon and d_lat attributes.
# This then causes the dictionary key check to fail (i.e.
# comparison against `diff._get_deriv_key()`)
data._differentials.update({'s': diff})
self.cache['representation'][cache_key] = data
return self.cache['representation'][cache_key]
def transform_to(self, new_frame):
"""
Transform this object's coordinate data to a new frame.
Parameters
----------
new_frame : class or frame object or SkyCoord object
The frame to transform this coordinate frame into.
Returns
-------
transframe
A new object with the coordinate data represented in the
``newframe`` system.
Raises
------
ValueError
If there is no possible transformation route.
"""
from .errors import ConvertError
if self._data is None:
raise ValueError('Cannot transform a frame with no data')
if (getattr(self.data, 'differentials', None) and
hasattr(self, 'obstime') and hasattr(new_frame, 'obstime') and
np.any(self.obstime != new_frame.obstime)):
raise NotImplementedError('You cannot transform a frame that has '
'velocities to another frame at a '
'different obstime. If you think this '
'should (or should not) be possible, '
'please comment at https://github.com/astropy/astropy/issues/6280')
if inspect.isclass(new_frame):
# Use the default frame attributes for this class
new_frame = new_frame()
if hasattr(new_frame, '_sky_coord_frame'):
# Input new_frame is not a frame instance or class and is most
# likely a SkyCoord object.
new_frame = new_frame._sky_coord_frame
trans = frame_transform_graph.get_transform(self.__class__,
new_frame.__class__)
if trans is None:
if new_frame is self.__class__:
# no special transform needed, but should update frame info
return new_frame.realize_frame(self.data)
msg = 'Cannot transform from {0} to {1}'
raise ConvertError(msg.format(self.__class__, new_frame.__class__))
return trans(self, new_frame)
def is_transformable_to(self, new_frame):
"""
Determines if this coordinate frame can be transformed to another
given frame.
Parameters
----------
new_frame : class or frame object
The proposed frame to transform into.
Returns
-------
transformable : bool or str
`True` if this can be transformed to ``new_frame``, `False` if
not, or the string 'same' if ``new_frame`` is the same system as
this object but no transformation is defined.
Notes
-----
A return value of 'same' means the transformation will work, but it will
just give back a copy of this object. The intended usage is::
if coord.is_transformable_to(some_unknown_frame):
coord2 = coord.transform_to(some_unknown_frame)
This will work even if ``some_unknown_frame`` turns out to be the same
frame class as ``coord``. This is intended for cases where the frame
is the same regardless of the frame attributes (e.g. ICRS), but be
aware that it *might* also indicate that someone forgot to define the
transformation between two objects of the same frame class but with
different attributes.
"""
new_frame_cls = new_frame if inspect.isclass(new_frame) else new_frame.__class__
trans = frame_transform_graph.get_transform(self.__class__, new_frame_cls)
if trans is None:
if new_frame_cls is self.__class__:
return 'same'
else:
return False
else:
return True
def is_frame_attr_default(self, attrnm):
"""
Determine whether or not a frame attribute has its value because it's
the default value, or because this frame was created with that value
explicitly requested.
Parameters
----------
attrnm : str
The name of the attribute to check.
Returns
-------
isdefault : bool
True if the attribute ``attrnm`` has its value by default, False if
it was specified at creation of this frame.
"""
return attrnm in self._attr_names_with_defaults
@staticmethod
def _frameattr_equiv(left_fattr, right_fattr):
"""
Determine if two frame attributes are equivalent. Implemented as a
staticmethod mainly as a convenient location, althought conceivable it
might be desirable for subclasses to override this behavior.
Primary purpose is to check for equality of representations, since by
default representation equality is only "is it the same object", which
is too strict for frame comparisons.
Note: this method may be removed when/if representations have an
appropriate equality defined.
"""
if isinstance(left_fattr, r.BaseRepresentationOrDifferential):
if left_fattr is right_fattr:
# shortcut if it's exactly the same object
return True
elif isinstance(right_fattr, r.BaseRepresentationOrDifferential):
# both are representations.
if ((hasattr(left_fattr, 'differentials') and left_fattr.differentials) or
hasattr(right_fattr, 'differentials') and right_fattr.differentials):
warnings.warn('Two representation frame attributes were '
'checked for equivalence when at least one of'
' them has differentials. This yields False '
'even if the underlying representations are '
'equivalent (although this may change in '
'future versions of Astropy)', AstropyWarning)
return False
if isinstance(right_fattr, left_fattr.__class__):
# if same representation type, compare components.
return np.all([(getattr(left_fattr, comp) ==
getattr(right_fattr, comp))
for comp in left_fattr.components])
else:
# convert to cartesian and see if they match
return np.all(left_fattr.to_cartesian().xyz ==
right_fattr.to_cartesian().xyz)
else:
return False
else:
return np.all(left_fattr == right_fattr)
def is_equivalent_frame(self, other):
"""
Checks if this object is the same frame as the ``other`` object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. Note that it does *not* matter what, if any,
data either object has.
Parameters
----------
other : BaseCoordinateFrame
the other frame to check
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a `BaseCoordinateFrame` or subclass.
"""
if self.__class__ == other.__class__:
for frame_attr_name in self.get_frame_attr_names():
if not self._frameattr_equiv(getattr(self, frame_attr_name),
getattr(other, frame_attr_name)):
return False
return True
elif not isinstance(other, BaseCoordinateFrame):
raise TypeError("Tried to do is_equivalent_frame on something that "
"isn't a frame")
else:
return False
def __repr__(self):
frameattrs = self._frame_attrs_repr()
data_repr = self._data_repr()
if frameattrs:
frameattrs = ' ({0})'.format(frameattrs)
if data_repr:
return '<{0} Coordinate{1}: {2}>'.format(self.__class__.__name__,
frameattrs, data_repr)
else:
return '<{0} Frame{1}>'.format(self.__class__.__name__,
frameattrs)
def _data_repr(self):
"""Returns a string representation of the coordinate data."""
if not self.has_data:
return ''
if self.representation_type:
if (hasattr(self.representation_type, '_unit_representation') and
isinstance(self.data, self.representation_type._unit_representation)):
rep_cls = self.data.__class__
else:
rep_cls = self.representation_type
if 's' in self.data.differentials:
dif_cls = self.get_representation_cls('s')
dif_data = self.data.differentials['s']
if isinstance(dif_data, (r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential)):
dif_cls = dif_data.__class__
else:
dif_cls = None
data = self.represent_as(rep_cls, dif_cls, in_frame_units=True)
data_repr = repr(data)
for nmpref, nmrepr in self.representation_component_names.items():
data_repr = data_repr.replace(nmrepr, nmpref)
else:
data = self.data
data_repr = repr(self.data)
if data_repr.startswith('<' + data.__class__.__name__):
# remove both the leading "<" and the space after the name, as well
# as the trailing ">"
data_repr = data_repr[(len(data.__class__.__name__) + 2):-1]
else:
data_repr = 'Data:\n' + data_repr
if 's' in self.data.differentials:
data_repr_spl = data_repr.split('\n')
if 'has differentials' in data_repr_spl[-1]:
diffrepr = repr(data.differentials['s']).split('\n')
if diffrepr[0].startswith('<'):
diffrepr[0] = ' ' + ' '.join(diffrepr[0].split(' ')[1:])
for frm_nm, rep_nm in self.get_representation_component_names('s').items():
diffrepr[0] = diffrepr[0].replace(rep_nm, frm_nm)
if diffrepr[-1].endswith('>'):
diffrepr[-1] = diffrepr[-1][:-1]
data_repr_spl[-1] = '\n'.join(diffrepr)
data_repr = '\n'.join(data_repr_spl)
return data_repr
def _frame_attrs_repr(self):
"""
Returns a string representation of the frame's attributes, if any.
"""
attr_strs = []
for attribute_name in self.get_frame_attr_names():
attr = getattr(self, attribute_name)
# Check to see if this object has a way of representing itself
# specific to being an attribute of a frame. (Note, this is not the
# Attribute class, it's the actual object).
if hasattr(attr, "_astropy_repr_in_frame"):
attrstr = attr._astropy_repr_in_frame()
else:
attrstr = str(attr)
attr_strs.append("{attribute_name}={attrstr}".format(
attribute_name=attribute_name,
attrstr=attrstr))
return ', '.join(attr_strs)
def _apply(self, method, *args, **kwargs):
"""Create a new instance, applying a method to the underlying data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.misc.ShapedLikeNDArray`. It will be
applied to the underlying arrays in the representation (e.g., ``x``,
``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),
as well as to any frame attributes that have a shape, with the results
used to create a new instance.
Internally, it is also used to apply functions to the above parts
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``.
"""
def apply_method(value):
if isinstance(value, ShapedLikeNDArray):
return value._apply(method, *args, **kwargs)
else:
if callable(method):
return method(value, *args, **kwargs)
else:
return getattr(value, method)(*args, **kwargs)
new = super().__new__(self.__class__)
if hasattr(self, '_representation'):
new._representation = self._representation.copy()
new._attr_names_with_defaults = self._attr_names_with_defaults.copy()
for attr in self.frame_attributes:
_attr = '_' + attr
if attr in self._attr_names_with_defaults:
setattr(new, _attr, getattr(self, _attr))
else:
value = getattr(self, _attr)
if getattr(value, 'size', 1) > 1:
value = apply_method(value)
elif method == 'copy' or method == 'flatten':
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
value = copy.copy(value)
setattr(new, _attr, value)
if self.has_data:
new._data = apply_method(self.data)
else:
new._data = None
shapes = [getattr(new, '_' + attr).shape
for attr in new.frame_attributes
if (attr not in new._attr_names_with_defaults and
getattr(getattr(new, '_' + attr), 'size', 1) > 1)]
if shapes:
new._no_data_shape = (check_broadcast(*shapes)
if len(shapes) > 1 else shapes[0])
else:
new._no_data_shape = ()
return new
@override__dir__
def __dir__(self):
"""
Override the builtin `dir` behavior to include representation
names.
TODO: dynamic representation transforms (i.e. include cylindrical et al.).
"""
dir_values = set(self.representation_component_names)
dir_values |= set(self.get_representation_component_names('s'))
return dir_values
def __getattr__(self, attr):
"""
Allow access to attributes on the representation and differential as
found via ``self.get_representation_component_names``.
TODO: We should handle dynamic representation transforms here (e.g.,
`.cylindrical`) instead of defining properties as below.
"""
# attr == '_representation' is likely from the hasattr() test in the
# representation property which is used for
# self.representation_component_names.
#
# Prevent infinite recursion here.
if attr.startswith('_'):
return self.__getattribute__(attr) # Raise AttributeError.
repr_names = self.representation_component_names
if attr in repr_names:
if self._data is None:
self.data # this raises the "no data" error by design - doing it
# this way means we don't have to replicate the error message here
rep = self.represent_as(self.representation_type,
in_frame_units=True)
val = getattr(rep, repr_names[attr])
return val
diff_names = self.get_representation_component_names('s')
if attr in diff_names:
if self._data is None:
self.data # see above.
# TODO: this doesn't work for the case when there is only
# unitspherical information. The differential_type gets set to the
# default_differential, which expects full information, so the
# units don't work out
rep = self.represent_as(in_frame_units=True,
**self.get_representation_cls(None))
val = getattr(rep.differentials['s'], diff_names[attr])
return val
return self.__getattribute__(attr) # Raise AttributeError.
def __setattr__(self, attr, value):
# Don't slow down access of private attributes!
if not attr.startswith('_'):
if hasattr(self, 'representation_info'):
repr_attr_names = set()
for representation_attr in self.representation_info.values():
repr_attr_names.update(representation_attr['names'])
if attr in repr_attr_names:
raise AttributeError(
'Cannot set any frame attribute {0}'.format(attr))
super().__setattr__(attr, value)
def separation(self, other):
"""
Computes on-sky separation between this coordinate and another.
.. note::
If the ``other`` coordinate object is in a different frame, it is
first transformed to the frame of this object. This can lead to
unintuitive behavior if not accounted for. Particularly of note is
that ``self.separation(other)`` and ``other.separation(self)`` may
not give the same answer in this case.
Parameters
----------
other : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Angle`
The on-sky separation between this and the ``other`` coordinate.
Notes
-----
The separation is calculated using the Vincenty formula, which
is stable at all locations, including poles and antipodes [1]_.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
from .angle_utilities import angular_separation
from .angles import Angle
self_unit_sph = self.represent_as(r.UnitSphericalRepresentation)
other_transformed = other.transform_to(self)
other_unit_sph = other_transformed.represent_as(r.UnitSphericalRepresentation)
# Get the separation as a Quantity, convert to Angle in degrees
sep = angular_separation(self_unit_sph.lon, self_unit_sph.lat,
other_unit_sph.lon, other_unit_sph.lat)
return Angle(sep, unit=u.degree)
def separation_3d(self, other):
"""
Computes three dimensional separation between this coordinate
and another.
Parameters
----------
other : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate system to get the distance to.
Returns
-------
sep : `~astropy.coordinates.Distance`
The real-space distance between these two coordinates.
Raises
------
ValueError
If this or the other coordinate do not have distances.
"""
from .distances import Distance
if issubclass(self.data.__class__, r.UnitSphericalRepresentation):
raise ValueError('This object does not have a distance; cannot '
'compute 3d separation.')
# do this first just in case the conversion somehow creates a distance
other_in_self_system = other.transform_to(self)
if issubclass(other_in_self_system.__class__, r.UnitSphericalRepresentation):
raise ValueError('The other object does not have a distance; '
'cannot compute 3d separation.')
# drop the differentials to ensure they don't do anything odd in the
# subtraction
self_car = self.data.without_differentials().represent_as(r.CartesianRepresentation)
other_car = other_in_self_system.data.without_differentials().represent_as(r.CartesianRepresentation)
return Distance((self_car - other_car).norm())
@property
def cartesian(self):
"""
Shorthand for a cartesian representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as('cartesian', in_frame_units=True)
@property
def spherical(self):
"""
Shorthand for a spherical representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as('spherical', in_frame_units=True)
@property
def sphericalcoslat(self):
"""
Shorthand for a spherical representation of the positional data and a
`SphericalCosLatDifferential` for the velocity data in this object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as('spherical', 'sphericalcoslat',
in_frame_units=True)
@property
def velocity(self):
"""
Shorthand for retrieving the Cartesian space-motion as a
`CartesianDifferential` object. This is equivalent to calling
``self.cartesian.differentials['s']``.
"""
if 's' not in self.data.differentials:
raise ValueError('Frame has no associated velocity (Differential) '
'data information.')
try:
v = self.cartesian.differentials['s']
except Exception as e:
raise ValueError('Could not retrieve a Cartesian velocity. Your '
'frame must include velocity information for this '
'to work.')
return v
@property
def proper_motion(self):
"""
Shorthand for the two-dimensional proper motion as a
`~astropy.units.Quantity` object with angular velocity units. In the
returned `~astropy.units.Quantity`, ``axis=0`` is the longitude/latitude
dimension so that ``.proper_motion[0]`` is the longitudinal proper
motion and ``.proper_motion[1]`` is latitudinal. The longitudinal proper
motion already includes the cos(latitude) term.
"""
if 's' not in self.data.differentials:
raise ValueError('Frame has no associated velocity (Differential) '
'data information.')
sph = self.represent_as('spherical', 'sphericalcoslat',
in_frame_units=True)
pm_lon = sph.differentials['s'].d_lon_coslat
pm_lat = sph.differentials['s'].d_lat
return np.stack((pm_lon.value,
pm_lat.to(pm_lon.unit).value), axis=0) * pm_lon.unit
@property
def radial_velocity(self):
"""
Shorthand for the radial or line-of-sight velocity as a
`~astropy.units.Quantity` object.
"""
if 's' not in self.data.differentials:
raise ValueError('Frame has no associated velocity (Differential) '
'data information.')
sph = self.represent_as('spherical', in_frame_units=True)
return sph.differentials['s'].d_distance
class GenericFrame(BaseCoordinateFrame):
"""
A frame object that can't store data but can hold any arbitrary frame
attributes. Mostly useful as a utility for the high-level class to store
intermediate frame attributes.
Parameters
----------
frame_attrs : dict
A dictionary of attributes to be used as the frame attributes for this
frame.
"""
name = None # it's not a "real" frame so it doesn't have a name
def __init__(self, frame_attrs):
self.frame_attributes = OrderedDict()
for name, default in frame_attrs.items():
self.frame_attributes[name] = Attribute(default)
setattr(self, '_' + name, default)
super().__init__(None)
def __getattr__(self, name):
if '_' + name in self.__dict__:
return getattr(self, '_' + name)
else:
raise AttributeError('no {0}'.format(name))
def __setattr__(self, name, value):
if name in self.get_frame_attr_names():
raise AttributeError("can't set frame attribute '{0}'".format(name))
else:
super().__setattr__(name, value)
|
e79088e6ae91f8d2382720f94e019f4f719b173159a3c341240a8b4490768a42 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions implementing some of the
algorithms contained within Jean Meeus, 'Astronomical Algorithms',
second edition, 1998, Willmann-Bell.
"""
import numpy as np
from numpy.polynomial.polynomial import polyval
from astropy import units as u
from astropy import _erfa as erfa
from . import ICRS, SkyCoord, GeocentricTrueEcliptic
from .builtin_frames.utils import get_jd12
__all__ = ["calc_moon"]
# Meeus 1998: table 47.A
# D M M' F l r
_MOON_L_R = (
(0, 0, 1, 0, 6288774, -20905355),
(2, 0, -1, 0, 1274027, -3699111),
(2, 0, 0, 0, 658314, -2955968),
(0, 0, 2, 0, 213618, -569925),
(0, 1, 0, 0, -185116, 48888),
(0, 0, 0, 2, -114332, -3149),
(2, 0, -2, 0, 58793, 246158),
(2, -1, -1, 0, 57066, -152138),
(2, 0, 1, 0, 53322, -170733),
(2, -1, 0, 0, 45758, -204586),
(0, 1, -1, 0, -40923, -129620),
(1, 0, 0, 0, -34720, 108743),
(0, 1, 1, 0, -30383, 104755),
(2, 0, 0, -2, 15327, 10321),
(0, 0, 1, 2, -12528, 0),
(0, 0, 1, -2, 10980, 79661),
(4, 0, -1, 0, 10675, -34782),
(0, 0, 3, 0, 10034, -23210),
(4, 0, -2, 0, 8548, -21636),
(2, 1, -1, 0, -7888, 24208),
(2, 1, 0, 0, -6766, 30824),
(1, 0, -1, 0, -5163, -8379),
(1, 1, 0, 0, 4987, -16675),
(2, -1, 1, 0, 4036, -12831),
(2, 0, 2, 0, 3994, -10445),
(4, 0, 0, 0, 3861, -11650),
(2, 0, -3, 0, 3665, 14403),
(0, 1, -2, 0, -2689, -7003),
(2, 0, -1, 2, -2602, 0),
(2, -1, -2, 0, 2390, 10056),
(1, 0, 1, 0, -2348, 6322),
(2, -2, 0, 0, 2236, -9884),
(0, 1, 2, 0, -2120, 5751),
(0, 2, 0, 0, -2069, 0),
(2, -2, -1, 0, 2048, -4950),
(2, 0, 1, -2, -1773, 4130),
(2, 0, 0, 2, -1595, 0),
(4, -1, -1, 0, 1215, -3958),
(0, 0, 2, 2, -1110, 0),
(3, 0, -1, 0, -892, 3258),
(2, 1, 1, 0, -810, 2616),
(4, -1, -2, 0, 759, -1897),
(0, 2, -1, 0, -713, -2117),
(2, 2, -1, 0, -700, 2354),
(2, 1, -2, 0, 691, 0),
(2, -1, 0, -2, 596, 0),
(4, 0, 1, 0, 549, -1423),
(0, 0, 4, 0, 537, -1117),
(4, -1, 0, 0, 520, -1571),
(1, 0, -2, 0, -487, -1739),
(2, 1, 0, -2, -399, 0),
(0, 0, 2, -2, -381, -4421),
(1, 1, 1, 0, 351, 0),
(3, 0, -2, 0, -340, 0),
(4, 0, -3, 0, 330, 0),
(2, -1, 2, 0, 327, 0),
(0, 2, 1, 0, -323, 1165),
(1, 1, -1, 0, 299, 0),
(2, 0, 3, 0, 294, 0),
(2, 0, -1, -2, 0, 8752)
)
# Meeus 1998: table 47.B
# D M M' F b
_MOON_B = (
(0, 0, 0, 1, 5128122),
(0, 0, 1, 1, 280602),
(0, 0, 1, -1, 277693),
(2, 0, 0, -1, 173237),
(2, 0, -1, 1, 55413),
(2, 0, -1, -1, 46271),
(2, 0, 0, 1, 32573),
(0, 0, 2, 1, 17198),
(2, 0, 1, -1, 9266),
(0, 0, 2, -1, 8822),
(2, -1, 0, -1, 8216),
(2, 0, -2, -1, 4324),
(2, 0, 1, 1, 4200),
(2, 1, 0, -1, -3359),
(2, -1, -1, 1, 2463),
(2, -1, 0, 1, 2211),
(2, -1, -1, -1, 2065),
(0, 1, -1, -1, -1870),
(4, 0, -1, -1, 1828),
(0, 1, 0, 1, -1794),
(0, 0, 0, 3, -1749),
(0, 1, -1, 1, -1565),
(1, 0, 0, 1, -1491),
(0, 1, 1, 1, -1475),
(0, 1, 1, -1, -1410),
(0, 1, 0, -1, -1344),
(1, 0, 0, -1, -1335),
(0, 0, 3, 1, 1107),
(4, 0, 0, -1, 1021),
(4, 0, -1, 1, 833),
# second column
(0, 0, 1, -3, 777),
(4, 0, -2, 1, 671),
(2, 0, 0, -3, 607),
(2, 0, 2, -1, 596),
(2, -1, 1, -1, 491),
(2, 0, -2, 1, -451),
(0, 0, 3, -1, 439),
(2, 0, 2, 1, 422),
(2, 0, -3, -1, 421),
(2, 1, -1, 1, -366),
(2, 1, 0, 1, -351),
(4, 0, 0, 1, 331),
(2, -1, 1, 1, 315),
(2, -2, 0, -1, 302),
(0, 0, 1, 3, -283),
(2, 1, 1, -1, -229),
(1, 1, 0, -1, 223),
(1, 1, 0, 1, 223),
(0, 1, -2, -1, -220),
(2, 1, -1, -1, -220),
(1, 0, 1, 1, -185),
(2, -1, -2, -1, 181),
(0, 1, 2, 1, -177),
(4, 0, -2, -1, 176),
(4, -1, -1, -1, 166),
(1, 0, 1, -1, -164),
(4, 0, 1, -1, 132),
(1, 0, -1, -1, -119),
(4, -1, 0, -1, 115),
(2, -2, 0, 1, 107)
)
"""
Coefficients of polynomials for various terms:
Lc : Mean longitude of Moon, w.r.t mean Equinox of date
D : Mean elongation of the Moon
M: Sun's mean anomaly
Mc : Moon's mean anomaly
F : Moon's argument of latitude (mean distance of Moon from its ascending node).
"""
_coLc = (2.18316448e+02, 4.81267881e+05, -1.57860000e-03,
1.85583502e-06, -1.53388349e-08)
_coD = (2.97850192e+02, 4.45267111e+05, -1.88190000e-03,
1.83194472e-06, -8.84447000e-09)
_coM = (3.57529109e+02, 3.59990503e+04, -1.53600000e-04,
4.08329931e-08)
_coMc = (1.34963396e+02, 4.77198868e+05, 8.74140000e-03,
1.43474081e-05, -6.79717238e-08)
_coF = (9.32720950e+01, 4.83202018e+05, -3.65390000e-03,
-2.83607487e-07, 1.15833246e-09)
_coA1 = (119.75, 131.849)
_coA2 = (53.09, 479264.290)
_coA3 = (313.45, 481266.484)
_coE = (1.0, -0.002516, -0.0000074)
def calc_moon(t):
"""
Lunar position model ELP2000-82 of (Chapront-Touze' and Chapront, 1983, 124, 50)
This is the simplified version of Jean Meeus, Astronomical Algorithms,
second edition, 1998, Willmann-Bell. Meeus claims approximate accuracy of 10"
in longitude and 4" in latitude, with no specified time range.
Tests against JPL ephemerides show accuracy of 10 arcseconds and 50 km over the
date range CE 1950-2050.
Parameters
-----------
t : `~astropy.time.Time`
Time of observation.
Returns
--------
skycoord : `~astropy.coordinates.SkyCoord`
ICRS Coordinate for the body
"""
# number of centuries since J2000.0.
# This should strictly speaking be in Ephemeris Time, but TDB or TT
# will introduce error smaller than intrinsic accuracy of algorithm.
T = (t.tdb.jyear-2000.0)/100.
# constants that are needed for all calculations
Lc = u.Quantity(polyval(T, _coLc), u.deg)
D = u.Quantity(polyval(T, _coD), u.deg)
M = u.Quantity(polyval(T, _coM), u.deg)
Mc = u.Quantity(polyval(T, _coMc), u.deg)
F = u.Quantity(polyval(T, _coF), u.deg)
A1 = u.Quantity(polyval(T, _coA1), u.deg)
A2 = u.Quantity(polyval(T, _coA2), u.deg)
A3 = u.Quantity(polyval(T, _coA3), u.deg)
E = polyval(T, _coE)
suml = sumr = 0.0
for DNum, MNum, McNum, FNum, LFac, RFac in _MOON_L_R:
corr = E ** abs(MNum)
suml += LFac*corr*np.sin(D*DNum+M*MNum+Mc*McNum+F*FNum)
sumr += RFac*corr*np.cos(D*DNum+M*MNum+Mc*McNum+F*FNum)
sumb = 0.0
for DNum, MNum, McNum, FNum, BFac in _MOON_B:
corr = E ** abs(MNum)
sumb += BFac*corr*np.sin(D*DNum+M*MNum+Mc*McNum+F*FNum)
suml += (3958*np.sin(A1) + 1962*np.sin(Lc-F) + 318*np.sin(A2))
sumb += (-2235*np.sin(Lc) + 382*np.sin(A3) + 175*np.sin(A1-F) +
175*np.sin(A1+F) + 127*np.sin(Lc-Mc) - 115*np.sin(Lc+Mc))
# ensure units
suml = suml*u.microdegree
sumb = sumb*u.microdegree
# nutation of longitude
jd1, jd2 = get_jd12(t, 'tt')
nut, _ = erfa.nut06a(jd1, jd2)
nut = nut*u.rad
# calculate ecliptic coordinates
lon = Lc + suml + nut
lat = sumb
dist = (385000.56+sumr/1000)*u.km
# Meeus algorithm gives GeocentricTrueEcliptic coordinates
ecliptic_coo = GeocentricTrueEcliptic(lon, lat, distance=dist,
obstime=t, equinox=t)
return SkyCoord(ecliptic_coo.transform_to(ICRS))
|
0d3720a65408aeef412369981a76046f2f0d312bb73a0f538347214c2909f029 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from warnings import warn
import collections
import socket
import json
import urllib.request
import urllib.error
import urllib.parse
import numpy as np
from astropy import units as u
from astropy import constants as consts
from astropy.units.quantity import QuantityInfoBase
from astropy.utils.exceptions import AstropyUserWarning
from .angles import Longitude, Latitude
from .representation import CartesianRepresentation, CartesianDifferential
from .errors import UnknownSiteException
from astropy.utils import data, deprecated
from astropy import _erfa as erfa
__all__ = ['EarthLocation']
GeodeticLocation = collections.namedtuple('GeodeticLocation', ['lon', 'lat', 'height'])
# Available ellipsoids (defined in erfam.h, with numbers exposed in erfa).
ELLIPSOIDS = ('WGS84', 'GRS80', 'WGS72')
OMEGA_EARTH = u.Quantity(7.292115855306589e-5, 1./u.s)
"""
Rotational velocity of Earth. In UT1 seconds, this would be 2 pi / (24 * 3600),
but we need the value in SI seconds.
See Explanatory Supplement to the Astronomical Almanac, ed. P. Kenneth Seidelmann (1992),
University Science Books.
"""
def _check_ellipsoid(ellipsoid=None, default='WGS84'):
if ellipsoid is None:
ellipsoid = default
if ellipsoid not in ELLIPSOIDS:
raise ValueError('Ellipsoid {0} not among known ones ({1})'
.format(ellipsoid, ELLIPSOIDS))
return ellipsoid
def _get_json_result(url, err_str, use_google):
# need to do this here to prevent a series of complicated circular imports
from .name_resolve import NameResolveError
try:
# Retrieve JSON response from Google maps API
resp = urllib.request.urlopen(url, timeout=data.conf.remote_timeout)
resp_data = json.loads(resp.read().decode('utf8'))
except urllib.error.URLError as e:
# This catches a timeout error, see:
# http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python
if isinstance(e.reason, socket.timeout):
raise NameResolveError(err_str.format(msg="connection timed out"))
else:
raise NameResolveError(err_str.format(msg=e.reason))
except socket.timeout:
# There are some cases where urllib2 does not catch socket.timeout
# especially while receiving response data on an already previously
# working request
raise NameResolveError(err_str.format(msg="connection timed out"))
if use_google:
results = resp_data.get('results', [])
if resp_data.get('status', None) != 'OK':
raise NameResolveError(err_str.format(msg="unknown failure with "
"Google API"))
else: # OpenStreetMap returns a list
results = resp_data
if not results:
raise NameResolveError(err_str.format(msg="no results returned"))
return results
class EarthLocationInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ('x', 'y', 'z', 'ellipsoid')
def _construct_from_dict(self, map):
# Need to pop ellipsoid off and update post-instantiation. This is
# on the to-fix list in #4261.
ellipsoid = map.pop('ellipsoid')
out = self._parent_cls(**map)
out.ellipsoid = ellipsoid
return out
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new EarthLocation instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : EarthLocation (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Very similar to QuantityInfo.new_like, but the creation of the
# map is different enough that this needs its own rouinte.
# Get merged info attributes shape, dtype, format, description.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'format', 'description'))
# The above raises an error if the dtypes do not match, but returns
# just the string representation, which is not useful, so remove.
attrs.pop('dtype')
# Make empty EarthLocation using the dtype and unit of the last column.
# Use zeros so we do not get problems for possible conversion to
# geodetic coordinates.
shape = (length,) + attrs.pop('shape')
data = u.Quantity(np.zeros(shape=shape, dtype=cols[0].dtype),
unit=cols[0].unit, copy=False)
# Get arguments needed to reconstruct class
map = {key: (data[key] if key in 'xyz' else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs}
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class EarthLocation(u.Quantity):
"""
Location on the Earth.
Initialization is first attempted assuming geocentric (x, y, z) coordinates
are given; if that fails, another attempt is made assuming geodetic
coordinates (longitude, latitude, height above a reference ellipsoid).
When using the geodetic forms, Longitudes are measured increasing to the
east, so west longitudes are negative. Internally, the coordinates are
stored as geocentric.
To ensure a specific type of coordinates is used, use the corresponding
class methods (`from_geocentric` and `from_geodetic`) or initialize the
arguments with names (``x``, ``y``, ``z`` for geocentric; ``lon``, ``lat``,
``height`` for geodetic). See the class methods for details.
Notes
-----
This class fits into the coordinates transformation framework in that it
encodes a position on the `~astropy.coordinates.ITRS` frame. To get a
proper `~astropy.coordinates.ITRS` object from this object, use the ``itrs``
property.
"""
_ellipsoid = 'WGS84'
_location_dtype = np.dtype({'names': ['x', 'y', 'z'],
'formats': [np.float64]*3})
_array_dtype = np.dtype((np.float64, (3,)))
info = EarthLocationInfo()
def __new__(cls, *args, **kwargs):
# TODO: needs copy argument and better dealing with inputs.
if (len(args) == 1 and len(kwargs) == 0 and
isinstance(args[0], EarthLocation)):
return args[0].copy()
try:
self = cls.from_geocentric(*args, **kwargs)
except (u.UnitsError, TypeError) as exc_geocentric:
try:
self = cls.from_geodetic(*args, **kwargs)
except Exception as exc_geodetic:
raise TypeError('Coordinates could not be parsed as either '
'geocentric or geodetic, with respective '
'exceptions "{0}" and "{1}"'
.format(exc_geocentric, exc_geodetic))
return self
@classmethod
def from_geocentric(cls, x, y, z, unit=None):
"""
Location on Earth, initialized from geocentric coordinates.
Parameters
----------
x, y, z : `~astropy.units.Quantity` or array-like
Cartesian coordinates. If not quantities, ``unit`` should be given.
unit : `~astropy.units.UnitBase` object or None
Physical unit of the coordinate values. If ``x``, ``y``, and/or
``z`` are quantities, they will be converted to this unit.
Raises
------
astropy.units.UnitsError
If the units on ``x``, ``y``, and ``z`` do not match or an invalid
unit is given.
ValueError
If the shapes of ``x``, ``y``, and ``z`` do not match.
TypeError
If ``x`` is not a `~astropy.units.Quantity` and no unit is given.
"""
if unit is None:
try:
unit = x.unit
except AttributeError:
raise TypeError("Geocentric coordinates should be Quantities "
"unless an explicit unit is given.")
else:
unit = u.Unit(unit)
if unit.physical_type != 'length':
raise u.UnitsError("Geocentric coordinates should be in "
"units of length.")
try:
x = u.Quantity(x, unit, copy=False)
y = u.Quantity(y, unit, copy=False)
z = u.Quantity(z, unit, copy=False)
except u.UnitsError:
raise u.UnitsError("Geocentric coordinate units should all be "
"consistent.")
x, y, z = np.broadcast_arrays(x, y, z)
struc = np.empty(x.shape, cls._location_dtype)
struc['x'], struc['y'], struc['z'] = x, y, z
return super().__new__(cls, struc, unit, copy=False)
@classmethod
def from_geodetic(cls, lon, lat, height=0., ellipsoid=None):
"""
Location on Earth, initialized from geodetic coordinates.
Parameters
----------
lon : `~astropy.coordinates.Longitude` or float
Earth East longitude. Can be anything that initialises an
`~astropy.coordinates.Angle` object (if float, in degrees).
lat : `~astropy.coordinates.Latitude` or float
Earth latitude. Can be anything that initialises an
`~astropy.coordinates.Latitude` object (if float, in degrees).
height : `~astropy.units.Quantity` or float, optional
Height above reference ellipsoid (if float, in meters; default: 0).
ellipsoid : str, optional
Name of the reference ellipsoid to use (default: 'WGS84').
Available ellipsoids are: 'WGS84', 'GRS80', 'WGS72'.
Raises
------
astropy.units.UnitsError
If the units on ``lon`` and ``lat`` are inconsistent with angular
ones, or that on ``height`` with a length.
ValueError
If ``lon``, ``lat``, and ``height`` do not have the same shape, or
if ``ellipsoid`` is not recognized as among the ones implemented.
Notes
-----
For the conversion to geocentric coordinates, the ERFA routine
``gd2gc`` is used. See https://github.com/liberfa/erfa
"""
ellipsoid = _check_ellipsoid(ellipsoid, default=cls._ellipsoid)
lon = Longitude(lon, u.degree, wrap_angle=180*u.degree, copy=False)
lat = Latitude(lat, u.degree, copy=False)
# don't convert to m by default, so we can use the height unit below.
if not isinstance(height, u.Quantity):
height = u.Quantity(height, u.m, copy=False)
# get geocentric coordinates. Have to give one-dimensional array.
xyz = erfa.gd2gc(getattr(erfa, ellipsoid),
lon.to_value(u.radian),
lat.to_value(u.radian),
height.to_value(u.m))
self = xyz.ravel().view(cls._location_dtype,
cls).reshape(xyz.shape[:-1])
self._unit = u.meter
self._ellipsoid = ellipsoid
return self.to(height.unit)
@classmethod
def of_site(cls, site_name):
"""
Return an object of this class for a known observatory/site by name.
This is intended as a quick convenience function to get basic site
information, not a fully-featured exhaustive registry of observatories
and all their properties.
Additional information about the site is stored in the ``.info.meta``
dictionary of sites obtained using this method (see the examples below).
.. note::
When this function is called, it will attempt to download site
information from the astropy data server. If you would like a site
to be added, issue a pull request to the
`astropy-data repository <https://github.com/astropy/astropy-data>`_ .
If a site cannot be found in the registry (i.e., an internet
connection is not available), it will fall back on a built-in list,
In the future, this bundled list might include a version-controlled
list of canonical observatories extracted from the online version,
but it currently only contains the Greenwich Royal Observatory as an
example case.
Parameters
----------
site_name : str
Name of the observatory (case-insensitive).
Returns
-------
site : This class (a `~astropy.coordinates.EarthLocation` or subclass)
The location of the observatory.
Examples
--------
>>> from astropy.coordinates import EarthLocation
>>> keck = EarthLocation.of_site('Keck Observatory') # doctest: +REMOTE_DATA
>>> keck.geodetic # doctest: +REMOTE_DATA +FLOAT_CMP
GeodeticLocation(lon=<Longitude -155.47833333 deg>, lat=<Latitude 19.82833333 deg>, height=<Quantity 4160. m>)
>>> keck.info.meta # doctest: +REMOTE_DATA
{'source': 'IRAF Observatory Database', 'timezone': 'US/Aleutian'}
See Also
--------
get_site_names : the list of sites that this function can access
"""
registry = cls._get_site_registry()
try:
el = registry[site_name]
except UnknownSiteException as e:
raise UnknownSiteException(e.site, 'EarthLocation.get_site_names', close_names=e.close_names)
if cls is el.__class__:
return el
else:
newel = cls.from_geodetic(*el.to_geodetic())
newel.info.name = el.info.name
return newel
@classmethod
def of_address(cls, address, get_height=False, google_api_key=None):
"""
Return an object of this class for a given address by querying either
the OpenStreetMap Nominatim tool [1]_ (default) or the Google geocoding
API [2]_, which requires a specified API key.
This is intended as a quick convenience function to get easy access to
locations. If you need to specify a precise location, you should use the
initializer directly and pass in a longitude, latitude, and elevation.
In the background, this just issues a web query to either of
the APIs noted above. This is not meant to be abused! Both
OpenStreetMap and Google use IP-based query limiting and will ban your
IP if you send more than a few thousand queries per hour [2]_.
.. warning::
If the query returns more than one location (e.g., searching on
``address='springfield'``), this function will use the **first**
returned location.
Parameters
----------
address : str
The address to get the location for. As per the Google maps API,
this can be a fully specified street address (e.g., 123 Main St.,
New York, NY) or a city name (e.g., Danbury, CT), or etc.
get_height : bool (optional)
This only works when using the Google API! See the ``google_api_key``
block below. Use the retrieved location to perform a second query to
the Google maps elevation API to retrieve the height of the input
address [3]_.
google_api_key : str (optional)
A Google API key with the Geocoding API and (optionally) the
elevation API enabled. See [4]_ for more information.
Returns
-------
location : This class (a `~astropy.coordinates.EarthLocation` or subclass)
The location of the input address.
References
----------
.. [1] https://nominatim.openstreetmap.org/
.. [2] https://developers.google.com/maps/documentation/geocoding/start
.. [3] https://developers.google.com/maps/documentation/elevation/
.. [4] https://developers.google.com/maps/documentation/geocoding/get-api-key
"""
use_google = google_api_key is not None
# Fail fast if invalid options are passed:
if not use_google and get_height:
raise ValueError('Currently, `get_height` only works when using '
'the Google geocoding API, which requires passing '
'a Google API key with `google_api_key`. See: '
'https://developers.google.com/maps/documentation/geocoding/get-api-key '
'for information on obtaining an API key.')
if use_google: # Google
pars = urllib.parse.urlencode({'address': address,
'key': google_api_key})
geo_url = ("https://maps.googleapis.com/maps/api/geocode/json?{0}"
.format(pars))
else: # OpenStreetMap
pars = urllib.parse.urlencode({'q': address,
'format': 'json'})
geo_url = ("https://nominatim.openstreetmap.org/search?{0}"
.format(pars))
# get longitude and latitude location
err_str = ("Unable to retrieve coordinates for address '{address}'; "
"{{msg}}".format(address=address))
geo_result = _get_json_result(geo_url, err_str=err_str,
use_google=use_google)
if use_google:
loc = geo_result[0]['geometry']['location']
lat = loc['lat']
lon = loc['lng']
else:
loc = geo_result[0]
lat = float(loc['lat']) # strings are returned by OpenStreetMap
lon = float(loc['lon'])
if get_height:
pars = {'locations': '{lat:.8f},{lng:.8f}'.format(lat=lat, lng=lon),
'key': google_api_key}
pars = urllib.parse.urlencode(pars)
ele_url = ("https://maps.googleapis.com/maps/api/elevation/json?{0}"
.format(pars))
err_str = ("Unable to retrieve elevation for address '{address}'; "
"{{msg}}".format(address=address))
ele_result = _get_json_result(ele_url, err_str=err_str,
use_google=use_google)
height = ele_result[0]['elevation']*u.meter
else:
height = 0.
return cls.from_geodetic(lon=lon*u.deg, lat=lat*u.deg, height=height)
@classmethod
def get_site_names(cls):
"""
Get list of names of observatories for use with
`~astropy.coordinates.EarthLocation.of_site`.
.. note::
When this function is called, it will first attempt to
download site information from the astropy data server. If it
cannot (i.e., an internet connection is not available), it will fall
back on the list included with astropy (which is a limited and dated
set of sites). If you think a site should be added, issue a pull
request to the
`astropy-data repository <https://github.com/astropy/astropy-data>`_ .
Returns
-------
names : list of str
List of valid observatory names
See Also
--------
of_site : Gets the actual location object for one of the sites names
this returns.
"""
return cls._get_site_registry().names
@classmethod
def _get_site_registry(cls, force_download=False, force_builtin=False):
"""
Gets the site registry. The first time this either downloads or loads
from the data file packaged with astropy. Subsequent calls will use the
cached version unless explicitly overridden.
Parameters
----------
force_download : bool or str
If not False, force replacement of the cached registry with a
downloaded version. If a str, that will be used as the URL to
download from (if just True, the default URL will be used).
force_builtin : bool
If True, load from the data file bundled with astropy and set the
cache to that.
returns
-------
reg : astropy.coordinates.sites.SiteRegistry
"""
if force_builtin and force_download:
raise ValueError('Cannot have both force_builtin and force_download True')
if force_builtin:
reg = cls._site_registry = get_builtin_sites()
else:
reg = getattr(cls, '_site_registry', None)
if force_download or not reg:
try:
if isinstance(force_download, str):
reg = get_downloaded_sites(force_download)
else:
reg = get_downloaded_sites()
except OSError:
if force_download:
raise
msg = ('Could not access the online site list. Falling '
'back on the built-in version, which is rather '
'limited. If you want to retry the download, do '
'{0}._get_site_registry(force_download=True)')
warn(AstropyUserWarning(msg.format(cls.__name__)))
reg = get_builtin_sites()
cls._site_registry = reg
return reg
@property
def ellipsoid(self):
"""The default ellipsoid used to convert to geodetic coordinates."""
return self._ellipsoid
@ellipsoid.setter
def ellipsoid(self, ellipsoid):
self._ellipsoid = _check_ellipsoid(ellipsoid)
@property
def geodetic(self):
"""Convert to geodetic coordinates for the default ellipsoid."""
return self.to_geodetic()
def to_geodetic(self, ellipsoid=None):
"""Convert to geodetic coordinates.
Parameters
----------
ellipsoid : str, optional
Reference ellipsoid to use. Default is the one the coordinates
were initialized with. Available are: 'WGS84', 'GRS80', 'WGS72'
Returns
-------
(lon, lat, height) : tuple
The tuple contains instances of `~astropy.coordinates.Longitude`,
`~astropy.coordinates.Latitude`, and `~astropy.units.Quantity`
Raises
------
ValueError
if ``ellipsoid`` is not recognized as among the ones implemented.
Notes
-----
For the conversion to geodetic coordinates, the ERFA routine
``gc2gd`` is used. See https://github.com/liberfa/erfa
"""
ellipsoid = _check_ellipsoid(ellipsoid, default=self.ellipsoid)
self_array = self.to(u.meter).view(self._array_dtype, np.ndarray)
lon, lat, height = erfa.gc2gd(getattr(erfa, ellipsoid), self_array)
return GeodeticLocation(
Longitude(lon * u.radian, u.degree,
wrap_angle=180.*u.degree, copy=False),
Latitude(lat * u.radian, u.degree, copy=False),
u.Quantity(height * u.meter, self.unit, copy=False))
@property
@deprecated('2.0', alternative='`lon`', obj_type='property')
def longitude(self):
"""Longitude of the location, for the default ellipsoid."""
return self.geodetic[0]
@property
def lon(self):
"""Longitude of the location, for the default ellipsoid."""
return self.geodetic[0]
@property
@deprecated('2.0', alternative='`lat`', obj_type='property')
def latitude(self):
"""Latitude of the location, for the default ellipsoid."""
return self.geodetic[1]
@property
def lat(self):
"""Longitude of the location, for the default ellipsoid."""
return self.geodetic[1]
@property
def height(self):
"""Height of the location, for the default ellipsoid."""
return self.geodetic[2]
# mostly for symmetry with geodetic and to_geodetic.
@property
def geocentric(self):
"""Convert to a tuple with X, Y, and Z as quantities"""
return self.to_geocentric()
def to_geocentric(self):
"""Convert to a tuple with X, Y, and Z as quantities"""
return (self.x, self.y, self.z)
def get_itrs(self, obstime=None):
"""
Generates an `~astropy.coordinates.ITRS` object with the location of
this object at the requested ``obstime``.
Parameters
----------
obstime : `~astropy.time.Time` or None
The ``obstime`` to apply to the new `~astropy.coordinates.ITRS`, or
if None, the default ``obstime`` will be used.
Returns
-------
itrs : `~astropy.coordinates.ITRS`
The new object in the ITRS frame
"""
# Broadcast for a single position at multiple times, but don't attempt
# to be more general here.
if obstime and self.size == 1 and obstime.size > 1:
self = np.broadcast_to(self, obstime.shape, subok=True)
# do this here to prevent a series of complicated circular imports
from .builtin_frames import ITRS
return ITRS(x=self.x, y=self.y, z=self.z, obstime=obstime)
itrs = property(get_itrs, doc="""An `~astropy.coordinates.ITRS` object with
for the location of this object at the
default ``obstime``.""")
def get_gcrs(self, obstime):
"""GCRS position with velocity at ``obstime`` as a GCRS coordinate.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the GCRS position/velocity at.
Returns
--------
gcrs : `~astropy.coordinates.GCRS` instance
With velocity included.
"""
# do this here to prevent a series of complicated circular imports
from .builtin_frames import GCRS
itrs = self.get_itrs(obstime)
# Assume the observatory itself is fixed on the ground.
# We do a direct assignment rather than an update to avoid validation
# and creation of a new object.
zeros = np.broadcast_to(0. * u.km / u.s, (3,) + itrs.shape, subok=True)
itrs.data.differentials['s'] = CartesianDifferential(zeros)
return itrs.transform_to(GCRS(obstime=obstime))
def get_gcrs_posvel(self, obstime):
"""
Calculate the GCRS position and velocity of this object at the
requested ``obstime``.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the GCRS position/velocity at.
Returns
--------
obsgeoloc : `~astropy.coordinates.CartesianRepresentation`
The GCRS position of the object
obsgeovel : `~astropy.coordinates.CartesianRepresentation`
The GCRS velocity of the object
"""
# GCRS position
gcrs_data = self.get_gcrs(obstime).data
obsgeopos = gcrs_data.without_differentials()
obsgeovel = gcrs_data.differentials['s'].to_cartesian()
return obsgeopos, obsgeovel
def gravitational_redshift(self, obstime,
bodies=['sun', 'jupiter', 'moon'],
masses={}):
"""Return the gravitational redshift at this EarthLocation.
Calculates the gravitational redshift, of order 3 m/s, due to the
requested solar system bodies.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the redshift at.
bodies : iterable, optional
The bodies (other than the Earth) to include in the redshift
calculation. List elements should be any body name
`get_body_barycentric` accepts. Defaults to Jupiter, the Sun, and
the Moon. Earth is always included (because the class represents
an *Earth* location).
masses : dict of str to Quantity, optional
The mass or gravitational parameters (G * mass) to assume for the
bodies requested in ``bodies``. Can be used to override the
defaults for the Sun, Jupiter, the Moon, and the Earth, or to
pass in masses for other bodies.
Returns
--------
redshift : `~astropy.units.Quantity`
Gravitational redshift in velocity units at given obstime.
"""
# needs to be here to avoid circular imports
from .solar_system import get_body_barycentric
bodies = list(bodies)
# Ensure earth is included and last in the list.
if 'earth' in bodies:
bodies.remove('earth')
bodies.append('earth')
_masses = {'sun': consts.GM_sun,
'jupiter': consts.GM_jup,
'moon': consts.G * 7.34767309e22*u.kg,
'earth': consts.GM_earth}
_masses.update(masses)
GMs = []
M_GM_equivalency = (u.kg, u.Unit(consts.G * u.kg))
for body in bodies:
try:
GMs.append(_masses[body].to(u.m**3/u.s**2, [M_GM_equivalency]))
except KeyError as exc:
raise KeyError('body "{}" does not have a mass!'.format(body))
except u.UnitsError as exc:
exc.args += ('"masses" argument values must be masses or '
'gravitational parameters',)
raise
positions = [get_body_barycentric(name, obstime) for name in bodies]
# Calculate distances to objects other than earth.
distances = [(pos - positions[-1]).norm() for pos in positions[:-1]]
# Append distance from Earth's center for Earth's contribution.
distances.append(CartesianRepresentation(self.geocentric).norm())
# Get redshifts due to all objects.
redshifts = [-GM / consts.c / distance for (GM, distance) in
zip(GMs, distances)]
# Reverse order of summing, to go from small to big, and to get
# "earth" first, which gives m/s as unit.
return sum(redshifts[::-1])
@property
def x(self):
"""The X component of the geocentric coordinates."""
return self['x']
@property
def y(self):
"""The Y component of the geocentric coordinates."""
return self['y']
@property
def z(self):
"""The Z component of the geocentric coordinates."""
return self['z']
def __getitem__(self, item):
result = super().__getitem__(item)
if result.dtype is self.dtype:
return result.view(self.__class__)
else:
return result.view(u.Quantity)
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
if hasattr(obj, '_ellipsoid'):
self._ellipsoid = obj._ellipsoid
def __len__(self):
if self.shape == ():
raise IndexError('0-d EarthLocation arrays cannot be indexed')
else:
return super().__len__()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
# Conversion to another unit in both ``to`` and ``to_value`` goes
# via this routine. To make the regular quantity routines work, we
# temporarily turn the structured array into a regular one.
array_view = self.view(self._array_dtype, np.ndarray)
if equivalencies == []:
equivalencies = self._equivalencies
new_array = self.unit.to(unit, array_view, equivalencies=equivalencies)
return new_array.view(self.dtype).reshape(self.shape)
# need to do this here at the bottom to avoid circular dependencies
from .sites import get_builtin_sites, get_downloaded_sites
|
32c2d0b2e9df00977ed66752ec60680f1d3a7e3b325f630e2a4f10316f977b2c | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the fundamental classes used for representing
coordinates in astropy.
"""
from collections import namedtuple
import numpy as np
from . import angle_utilities as util
from astropy import units as u
from astropy.utils import isiterable
from astropy.utils.compat import NUMPY_LT_1_14_1, NUMPY_LT_1_14_2
__all__ = ['Angle', 'Latitude', 'Longitude']
# these are used by the `hms` and `dms` attributes
hms_tuple = namedtuple('hms_tuple', ('h', 'm', 's'))
dms_tuple = namedtuple('dms_tuple', ('d', 'm', 's'))
signed_dms_tuple = namedtuple('signed_dms_tuple', ('sign', 'd', 'm', 's'))
class Angle(u.SpecificTypeQuantity):
"""
One or more angular value(s) with units equivalent to radians or degrees.
An angle can be specified either as an array, scalar, tuple (see
below), string, `~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports a variety of formats::
Angle('10.2345d')
Angle(['10.2345d', '-20d'])
Angle('1:2:30.43 degrees')
Angle('1 2 0 hours')
Angle(np.arange(1, 8), unit=u.deg)
Angle('1°2′3″')
Angle('1d2m3.4s')
Angle('-1h2m3s')
Angle('-1h2.5m')
Angle('-1:2.5', unit=u.deg)
Angle((10, 11, 12), unit='hourangle') # (h, m, s)
Angle((-1, 2, 3), unit=u.deg) # (d, m, s)
Angle(10.2345 * u.deg)
Angle(Angle(10.2345 * u.deg))
Parameters
----------
angle : `~numpy.array`, scalar, `~astropy.units.Quantity`, :class:`~astropy.coordinates.Angle`
The angle value. If a tuple, will be interpreted as ``(h, m,
s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it
will be interpreted following the rules described above.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : `~astropy.units.UnitBase`, str, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
"""
_equivalent_unit = u.radian
_include_easy_conversion_members = True
def __new__(cls, angle, unit=None, dtype=None, copy=True):
if not isinstance(angle, u.Quantity):
if unit is not None:
unit = cls._convert_unit_to_angle_unit(u.Unit(unit))
if isinstance(angle, tuple):
angle = cls._tuple_to_float(angle, unit)
elif isinstance(angle, str):
angle, angle_unit = util.parse_angle(angle, unit)
if angle_unit is None:
angle_unit = unit
if isinstance(angle, tuple):
angle = cls._tuple_to_float(angle, angle_unit)
if angle_unit is not unit:
# Possible conversion to `unit` will be done below.
angle = u.Quantity(angle, angle_unit, copy=False)
elif (isiterable(angle) and
not (isinstance(angle, np.ndarray) and
angle.dtype.kind not in 'SUVO')):
angle = [Angle(x, unit, copy=False) for x in angle]
return super().__new__(cls, angle, unit, dtype=dtype, copy=copy)
@staticmethod
def _tuple_to_float(angle, unit):
"""
Converts an angle represented as a 3-tuple or 2-tuple into a floating
point number in the given unit.
"""
# TODO: Numpy array of tuples?
if unit == u.hourangle:
return util.hms_to_hours(*angle)
elif unit == u.degree:
return util.dms_to_degrees(*angle)
else:
raise u.UnitsError("Can not parse '{0}' as unit '{1}'"
.format(angle, unit))
@staticmethod
def _convert_unit_to_angle_unit(unit):
return u.hourangle if unit is u.hour else unit
def _set_unit(self, unit):
super()._set_unit(self._convert_unit_to_angle_unit(unit))
@property
def hour(self):
"""
The angle's value in hours (read-only property).
"""
return self.hourangle
@property
def hms(self):
"""
The angle's value in hours, as a named tuple with ``(h, m, s)``
members. (This is a read-only property.)
"""
return hms_tuple(*util.hours_to_hms(self.hourangle))
@property
def dms(self):
"""
The angle's value in degrees, as a named tuple with ``(d, m, s)``
members. (This is a read-only property.)
"""
return dms_tuple(*util.degrees_to_dms(self.degree))
@property
def signed_dms(self):
"""
The angle's value in degrees, as a named tuple with ``(sign, d, m, s)``
members. The ``d``, ``m``, ``s`` are thus always positive, and the sign of
the angle is given by ``sign``. (This is a read-only property.)
This is primarily intended for use with `dms` to generate string
representations of coordinates that are correct for negative angles.
"""
return signed_dms_tuple(np.sign(self.degree),
*util.degrees_to_dms(np.abs(self.degree)))
def to_string(self, unit=None, decimal=False, sep='fromunit',
precision=None, alwayssign=False, pad=False,
fields=3, format=None):
""" A string representation of the angle.
Parameters
----------
unit : `~astropy.units.UnitBase`, optional
Specifies the unit. Must be an angular unit. If not
provided, the unit used to initialize the angle will be
used.
decimal : bool, optional
If `True`, a decimal representation will be used, otherwise
the returned string will be in sexagesimal form.
sep : str, optional
The separator between numbers in a sexagesimal
representation. E.g., if it is ':', the result is
``'12:41:11.1241'``. Also accepts 2 or 3 separators. E.g.,
``sep='hms'`` would give the result ``'12h41m11.1241s'``, or
sep='-:' would yield ``'11-21:17.124'``. Alternatively, the
special string 'fromunit' means 'dms' if the unit is
degrees, or 'hms' if the unit is hours.
precision : int, optional
The level of decimal precision. If ``decimal`` is `True`,
this is the raw precision, otherwise it gives the
precision of the last place of the sexagesimal
representation (seconds). If `None`, or not provided, the
number of decimal places is determined by the value, and
will be between 0-8 decimal places as required.
alwayssign : bool, optional
If `True`, include the sign no matter what. If `False`,
only include the sign if it is negative.
pad : bool, optional
If `True`, include leading zeros when needed to ensure a
fixed number of characters for sexagesimal representation.
fields : int, optional
Specifies the number of fields to display when outputting
sexagesimal notation. For example:
- fields == 1: ``'5d'``
- fields == 2: ``'5d45m'``
- fields == 3: ``'5d45m32.5s'``
By default, all fields are displayed.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
- 'unicode': Return a string containing non-ASCII unicode
characters, such as the degree symbol
Returns
-------
strrepr : str or array
A string representation of the angle. If the angle is an array, this
will be an array with a unicode dtype.
"""
if unit is None:
unit = self.unit
else:
unit = self._convert_unit_to_angle_unit(u.Unit(unit))
separators = {
None: {
u.degree: 'dms',
u.hourangle: 'hms'},
'latex': {
u.degree: [r'^\circ', r'{}^\prime', r'{}^{\prime\prime}'],
u.hourangle: [r'^\mathrm{h}', r'^\mathrm{m}', r'^\mathrm{s}']},
'unicode': {
u.degree: '°′″',
u.hourangle: 'ʰᵐˢ'}
}
if sep == 'fromunit':
if format not in separators:
raise ValueError("Unknown format '{0}'".format(format))
seps = separators[format]
if unit in seps:
sep = seps[unit]
# Create an iterator so we can format each element of what
# might be an array.
if unit is u.degree:
if decimal:
values = self.degree
if precision is not None:
func = ("{0:0." + str(precision) + "f}").format
else:
func = '{0:g}'.format
else:
if sep == 'fromunit':
sep = 'dms'
values = self.degree
func = lambda x: util.degrees_to_string(
x, precision=precision, sep=sep, pad=pad,
fields=fields)
elif unit is u.hourangle:
if decimal:
values = self.hour
if precision is not None:
func = ("{0:0." + str(precision) + "f}").format
else:
func = '{0:g}'.format
else:
if sep == 'fromunit':
sep = 'hms'
values = self.hour
func = lambda x: util.hours_to_string(
x, precision=precision, sep=sep, pad=pad,
fields=fields)
elif unit.is_equivalent(u.radian):
if decimal:
values = self.to_value(unit)
if precision is not None:
func = ("{0:1." + str(precision) + "f}").format
else:
func = "{0:g}".format
elif sep == 'fromunit':
values = self.to_value(unit)
unit_string = unit.to_string(format=format)
if format == 'latex':
unit_string = unit_string[1:-1]
if precision is not None:
def plain_unit_format(val):
return ("{0:0." + str(precision) + "f}{1}").format(
val, unit_string)
func = plain_unit_format
else:
def plain_unit_format(val):
return "{0:g}{1}".format(val, unit_string)
func = plain_unit_format
else:
raise ValueError(
"'{0}' can not be represented in sexagesimal "
"notation".format(
unit.name))
else:
raise u.UnitsError(
"The unit value provided is not an angular unit.")
def do_format(val):
s = func(float(val))
if alwayssign and not s.startswith('-'):
s = '+' + s
if format == 'latex':
s = '${0}$'.format(s)
return s
format_ufunc = np.vectorize(do_format, otypes=['U'])
result = format_ufunc(values)
if result.ndim == 0:
result = result[()]
return result
def wrap_at(self, wrap_angle, inplace=False):
"""
Wrap the `Angle` object at the given ``wrap_angle``.
This method forces all the angle values to be within a contiguous
360 degree range so that ``wrap_angle - 360d <= angle <
wrap_angle``. By default a new Angle object is returned, but if the
``inplace`` argument is `True` then the `Angle` object is wrapped in
place and nothing is returned.
For instance::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20.0, 150.0, 350.0] * u.deg)
>>> a.wrap_at(360 * u.deg).degree # Wrap into range 0 to 360 degrees # doctest: +FLOAT_CMP
array([340., 150., 350.])
>>> a.wrap_at('180d', inplace=True) # Wrap into range -180 to 180 degrees # doctest: +FLOAT_CMP
>>> a.degree # doctest: +FLOAT_CMP
array([-20., 150., -10.])
Parameters
----------
wrap_angle : str, `Angle`, angular `~astropy.units.Quantity`
Specifies a single value for the wrap angle. This can be any
object that can initialize an `Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
inplace : bool
If `True` then wrap the object in place instead of returning
a new `Angle`
Returns
-------
out : Angle or `None`
If ``inplace is False`` (default), return new `Angle` object
with angles wrapped accordingly. Otherwise wrap in place and
return `None`.
"""
wrap_angle = Angle(wrap_angle) # Convert to an Angle
wrapped = np.mod(self - wrap_angle, 360.0 * u.deg) - (360.0 * u.deg - wrap_angle)
if inplace:
self[()] = wrapped
else:
return wrapped
def is_within_bounds(self, lower=None, upper=None):
"""
Check if all angle(s) satisfy ``lower <= angle < upper``
If ``lower`` is not specified (or `None`) then no lower bounds check is
performed. Likewise ``upper`` can be left unspecified. For example::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20, 150, 350] * u.deg)
>>> a.is_within_bounds('0d', '360d')
False
>>> a.is_within_bounds(None, '360d')
True
>>> a.is_within_bounds(-30 * u.deg, None)
True
Parameters
----------
lower : str, `Angle`, angular `~astropy.units.Quantity`, `None`
Specifies lower bound for checking. This can be any object
that can initialize an `Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
upper : str, `Angle`, angular `~astropy.units.Quantity`, `None`
Specifies upper bound for checking. This can be any object
that can initialize an `Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
Returns
-------
is_within_bounds : bool
`True` if all angles satisfy ``lower <= angle < upper``
"""
ok = True
if lower is not None:
ok &= np.all(Angle(lower) <= self)
if ok and upper is not None:
ok &= np.all(self < Angle(upper))
return bool(ok)
def _str_helper(self, format=None):
if self.isscalar:
return self.to_string(format=format)
if NUMPY_LT_1_14_1 or not NUMPY_LT_1_14_2:
def formatter(x):
return x.to_string(format=format)
else:
# In numpy 1.14.1, array2print formatters get passed plain numpy scalars instead
# of subclass array scalars, so we need to recreate an array scalar.
def formatter(x):
return self._new_view(x).to_string(format=format)
return np.array2string(self, formatter={'all': formatter})
def __str__(self):
return self._str_helper()
def _repr_latex_(self):
return self._str_helper(format='latex')
def _no_angle_subclass(obj):
"""Return any Angle subclass objects as an Angle objects.
This is used to ensure that Latitude and Longitude change to Angle
objects when they are used in calculations (such as lon/2.)
"""
if isinstance(obj, tuple):
return tuple(_no_angle_subclass(_obj) for _obj in obj)
return obj.view(Angle) if isinstance(obj, Angle) else obj
class Latitude(Angle):
"""
Latitude-like angle(s) which must be in the range -90 to +90 deg.
A Latitude object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of being constrained
so that::
-90.0 * u.deg <= angle(s) <= +90.0 * u.deg
Any attempt to set a value outside that range will result in a
`ValueError`.
The input angle(s) can be specified either as an array, list,
scalar, tuple (see below), string,
:class:`~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : array, list, scalar, `~astropy.units.Quantity`, `Angle`. The
angle value(s). If a tuple, will be interpreted as ``(h, m, s)`` or
``(d, m, s)`` depending on ``unit``. If a string, it will be
interpreted following the rules described for
:class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : :class:`~astropy.units.UnitBase`, str, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Longitude`.
"""
def __new__(cls, angle, unit=None, **kwargs):
# Forbid creating a Lat from a Long.
if isinstance(angle, Longitude):
raise TypeError("A Latitude angle cannot be created from a Longitude angle")
self = super().__new__(cls, angle, unit=unit, **kwargs)
self._validate_angles()
return self
def _validate_angles(self, angles=None):
"""Check that angles are between -90 and 90 degrees.
If not given, the check is done on the object itself"""
# Convert the lower and upper bounds to the "native" unit of
# this angle. This limits multiplication to two values,
# rather than the N values in `self.value`. Also, the
# comparison is performed on raw arrays, rather than Quantity
# objects, for speed.
if angles is None:
angles = self
lower = u.degree.to(angles.unit, -90.0)
upper = u.degree.to(angles.unit, 90.0)
if np.any(angles.value < lower) or np.any(angles.value > upper):
raise ValueError('Latitude angle(s) must be within -90 deg <= angle <= 90 deg, '
'got {0}'.format(angles.to(u.degree)))
def __setitem__(self, item, value):
# Forbid assigning a Long to a Lat.
if isinstance(value, Longitude):
raise TypeError("A Longitude angle cannot be assigned to a Latitude angle")
# first check bounds
self._validate_angles(value)
super().__setitem__(item, value)
# Any calculation should drop to Angle
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
class LongitudeInfo(u.QuantityInfo):
_represent_as_dict_attrs = u.QuantityInfo._represent_as_dict_attrs + ('wrap_angle',)
class Longitude(Angle):
"""
Longitude-like angle(s) which are wrapped within a contiguous 360 degree range.
A ``Longitude`` object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of a ``wrap_angle``
property. The ``wrap_angle`` specifies that all angle values
represented by the object will be in the range::
wrap_angle - 360 * u.deg <= angle(s) < wrap_angle
The default ``wrap_angle`` is 360 deg. Setting ``wrap_angle=180 *
u.deg`` would instead result in values between -180 and +180 deg.
Setting the ``wrap_angle`` attribute of an existing ``Longitude``
object will result in re-wrapping the angle values in-place.
The input angle(s) can be specified either as an array, list,
scalar, tuple, string, :class:`~astropy.units.Quantity`
or another :class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : array, list, scalar, `~astropy.units.Quantity`,
:class:`~astropy.coordinates.Angle` The angle value(s). If a tuple,
will be interpreted as ``(h, m s)`` or ``(d, m, s)`` depending
on ``unit``. If a string, it will be interpreted following the
rules described for :class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : :class:`~astropy.units.UnitBase`, str, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
wrap_angle : :class:`~astropy.coordinates.Angle` or equivalent, or None
Angle at which to wrap back to ``wrap_angle - 360 deg``.
If ``None`` (default), it will be taken to be 360 deg unless ``angle``
has a ``wrap_angle`` attribute already (i.e., is a ``Longitude``),
in which case it will be taken from there.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Latitude`.
"""
_wrap_angle = None
_default_wrap_angle = Angle(360 * u.deg)
info = LongitudeInfo()
def __new__(cls, angle, unit=None, wrap_angle=None, **kwargs):
# Forbid creating a Long from a Lat.
if isinstance(angle, Latitude):
raise TypeError("A Longitude angle cannot be created from "
"a Latitude angle.")
self = super().__new__(cls, angle, unit=unit, **kwargs)
if wrap_angle is None:
wrap_angle = getattr(angle, 'wrap_angle', self._default_wrap_angle)
self.wrap_angle = wrap_angle
return self
def __setitem__(self, item, value):
# Forbid assigning a Lat to a Long.
if isinstance(value, Latitude):
raise TypeError("A Latitude angle cannot be assigned to a Longitude angle")
super().__setitem__(item, value)
self._wrap_internal()
def _wrap_internal(self):
"""
Wrap the internal values in the Longitude object. Using the
:meth:`~astropy.coordinates.Angle.wrap_at` method causes
recursion.
"""
# Convert the wrap angle and 360 degrees to the native unit of
# this Angle, then do all the math on raw Numpy arrays rather
# than Quantity objects for speed.
a360 = u.degree.to(self.unit, 360.0)
wrap_angle = self.wrap_angle.to_value(self.unit)
wrap_angle_floor = wrap_angle - a360
self_angle = self.value
# Do the wrapping, but only if any angles need to be wrapped
if np.any(self_angle < wrap_angle_floor) or np.any(self_angle >= wrap_angle):
wrapped = np.mod(self_angle - wrap_angle, a360) + wrap_angle_floor
value = u.Quantity(wrapped, self.unit)
super().__setitem__((), value)
@property
def wrap_angle(self):
return self._wrap_angle
@wrap_angle.setter
def wrap_angle(self, value):
self._wrap_angle = Angle(value, copy=False)
self._wrap_internal()
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._wrap_angle = getattr(obj, '_wrap_angle',
self._default_wrap_angle)
# Any calculation should drop to Angle
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
|
7584869baa1610710a2814f9a66ed9aa0c6394c5c84d314abdf3f5e65735564e | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Dependencies
import numpy as np
import warnings
# Project
from astropy import units as u
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils import OrderedDescriptor, ShapedLikeNDArray
__all__ = ['Attribute', 'TimeAttribute', 'QuantityAttribute',
'EarthLocationAttribute', 'CoordinateAttribute',
'CartesianRepresentationAttribute',
'DifferentialAttribute']
class Attribute(OrderedDescriptor):
"""A non-mutable data descriptor to hold a frame attribute.
This class must be used to define frame attributes (e.g. ``equinox`` or
``obstime``) that are included in a frame class definition.
Examples
--------
The `~astropy.coordinates.FK4` class uses the following class attributes::
class FK4(BaseCoordinateFrame):
equinox = TimeAttribute(default=_EQUINOX_B1950)
obstime = TimeAttribute(default=None,
secondary_attribute='equinox')
This means that ``equinox`` and ``obstime`` are available to be set as
keyword arguments when creating an ``FK4`` class instance and are then
accessible as instance attributes. The instance value for the attribute
must be stored in ``'_' + <attribute_name>`` by the frame ``__init__``
method.
Note in this example that ``equinox`` and ``obstime`` are time attributes
and use the ``TimeAttributeFrame`` class. This subclass overrides the
``convert_input`` method to validate and convert inputs into a ``Time``
object.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
_class_attribute_ = 'frame_attributes'
_name_attribute_ = 'name'
name = '<unbound>'
def __init__(self, default=None, secondary_attribute=''):
self.default = default
self.secondary_attribute = secondary_attribute
super().__init__()
def convert_input(self, value):
"""
Validate the input ``value`` and convert to expected attribute class.
The base method here does nothing, but subclasses can implement this
as needed. The method should catch any internal exceptions and raise
ValueError with an informative message.
The method returns the validated input along with a boolean that
indicates whether the input value was actually converted. If the input
value was already the correct type then the ``converted`` return value
should be ``False``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
output_value
The ``value`` converted to the correct type (or just ``value`` if
``converted`` is False)
converted : bool
True if the conversion was actually performed, False otherwise.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
return value, False
def __get__(self, instance, frame_cls=None):
if instance is None:
out = self.default
else:
out = getattr(instance, '_' + self.name, self.default)
if out is None:
out = getattr(instance, self.secondary_attribute, self.default)
out, converted = self.convert_input(out)
if instance is not None:
instance_shape = getattr(instance, 'shape', None)
if instance_shape is not None and (getattr(out, 'size', 1) > 1 and
out.shape != instance_shape):
# If the shapes do not match, try broadcasting.
try:
if isinstance(out, ShapedLikeNDArray):
out = out._apply(np.broadcast_to, shape=instance_shape,
subok=True)
else:
out = np.broadcast_to(out, instance_shape, subok=True)
except ValueError:
# raise more informative exception.
raise ValueError(
"attribute {0} should be scalar or have shape {1}, "
"but is has shape {2} and could not be broadcast."
.format(self.name, instance_shape, out.shape))
converted = True
if converted:
setattr(instance, '_' + self.name, out)
return out
def __set__(self, instance, val):
raise AttributeError('Cannot set frame attribute')
class TimeAttribute(Attribute):
"""
Frame attribute descriptor for quantities that are Time objects.
See the `~astropy.coordinates.Attribute` API doc for further
information.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Convert input value to a Time object and validate by running through
the Time constructor. Also check that the input was a scalar.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
from astropy.time import Time
if value is None:
return None, False
if isinstance(value, Time):
out = value
converted = False
else:
try:
out = Time(value)
except Exception as err:
raise ValueError(
'Invalid time input {0}={1!r}\n{2}'.format(self.name,
value, err))
converted = True
# Set attribute as read-only for arrays (not allowed by numpy
# for array scalars)
if out.shape:
out.writeable = False
return out, converted
class CartesianRepresentationAttribute(Attribute):
"""
A frame attribute that is a CartesianRepresentation with specified units.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit object or None
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
"""
def __init__(self, default=None, secondary_attribute='', unit=None):
super().__init__(default, secondary_attribute)
self.unit = unit
def convert_input(self, value):
"""
Checks that the input is a CartesianRepresentation with the correct
unit, or the special value ``[0, 0, 0]``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if (isinstance(value, list) and len(value) == 3 and
all(v == 0 for v in value) and self.unit is not None):
return CartesianRepresentation(np.zeros(3) * self.unit), True
else:
# is it a CartesianRepresentation with correct unit?
if hasattr(value, 'xyz') and value.xyz.unit == self.unit:
return value, False
converted = True
# if it's a CartesianRepresentation, get the xyz Quantity
value = getattr(value, 'xyz', value)
if not hasattr(value, 'unit'):
raise TypeError('tried to set a {0} with something that does '
'not have a unit.'
.format(self.__class__.__name__))
value = value.to(self.unit)
# now try and make a CartesianRepresentation.
cartrep = CartesianRepresentation(value, copy=False)
return cartrep, converted
class QuantityAttribute(Attribute):
"""
A frame attribute that is a quantity with specified units and shape
(optionally).
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit object or None
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
shape : tuple or None
If given, specifies the shape the attribute must be
"""
def __init__(self, default, secondary_attribute='', unit=None, shape=None):
self.unit = unit
self.shape = shape
default = self.convert_input(default)[0]
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
raise TypeError('QuantityAttributes cannot be None, because None '
'is not a Quantity')
if np.all(value == 0) and self.unit is not None:
return u.Quantity(np.zeros(self.shape), self.unit), True
else:
if not hasattr(value, 'unit') and self.unit != u.dimensionless_unscaled:
raise TypeError('Tried to set a QuantityAttribute with '
'something that does not have a unit.')
oldvalue = value
value = u.Quantity(oldvalue, self.unit, copy=False)
if self.shape is not None and value.shape != self.shape:
raise ValueError('The provided value has shape "{0}", but '
'should have shape "{1}"'.format(value.shape,
self.shape))
converted = oldvalue is not value
return value, converted
class EarthLocationAttribute(Attribute):
"""
A frame attribute that can act as a `~astropy.coordinates.EarthLocation`.
It can be created as anything that can be transformed to the
`~astropy.coordinates.ITRS` frame, but always presents as an `EarthLocation`
when accessed after creation.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
elif isinstance(value, EarthLocation):
return value, False
else:
# we have to do the import here because of some tricky circular deps
from .builtin_frames import ITRS
if not hasattr(value, 'transform_to'):
raise ValueError('"{0}" was passed into an '
'EarthLocationAttribute, but it does not have '
'"transform_to" method'.format(value))
itrsobj = value.transform_to(ITRS)
return itrsobj.earth_location, True
class CoordinateAttribute(Attribute):
"""
A frame attribute which is a coordinate object. It can be given as a
low-level frame class *or* a `~astropy.coordinates.SkyCoord`, but will
always be converted to the low-level frame class when accessed.
Parameters
----------
frame : a coordinate frame class
The type of frame this attribute can be
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, frame, default=None, secondary_attribute=''):
self._frame = frame
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a SkyCoord with the necessary units (or the
special value ``None``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
elif isinstance(value, self._frame):
return value, False
else:
if not hasattr(value, 'transform_to'):
raise ValueError('"{0}" was passed into a '
'CoordinateAttribute, but it does not have '
'"transform_to" method'.format(value))
transformedobj = value.transform_to(self._frame)
if hasattr(transformedobj, 'frame'):
transformedobj = transformedobj.frame
return transformedobj, True
class DifferentialAttribute(Attribute):
"""A frame attribute which is a differential instance.
The optional ``allowed_classes`` argument allows specifying a restricted
set of valid differential classes to check the input against. Otherwise,
any `~astropy.coordinates.BaseDifferential` subclass instance is valid.
Parameters
----------
default : object
Default value for the attribute if not provided
allowed_classes : tuple, optional
A list of allowed differential classes for this attribute to have.
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, default=None, allowed_classes=None,
secondary_attribute=''):
if allowed_classes is not None:
self.allowed_classes = tuple(allowed_classes)
else:
self.allowed_classes = BaseDifferential
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a differential object and is one of the
allowed class types.
Parameters
----------
value : object
Input value.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if not isinstance(value, self.allowed_classes):
raise TypeError('Tried to set a DifferentialAttribute with '
'an unsupported Differential type {0}. Allowed '
'classes are: {1}'
.format(value.__class__,
self.allowed_classes))
return value, True
# Backwards-compatibility: these are the only classes that were previously
# released in v1.3
class FrameAttribute(Attribute):
def __init__(self, *args, **kwargs):
warnings.warn("FrameAttribute has been renamed to Attribute.",
AstropyDeprecationWarning)
super().__init__(*args, **kwargs)
class TimeFrameAttribute(TimeAttribute):
def __init__(self, *args, **kwargs):
warnings.warn("TimeFrameAttribute has been renamed to TimeAttribute.",
AstropyDeprecationWarning)
super().__init__(*args, **kwargs)
class QuantityFrameAttribute(QuantityAttribute):
def __init__(self, *args, **kwargs):
warnings.warn("QuantityFrameAttribute has been renamed to "
"QuantityAttribute.", AstropyDeprecationWarning)
super().__init__(*args, **kwargs)
class CartesianRepresentationFrameAttribute(CartesianRepresentationAttribute):
def __init__(self, *args, **kwargs):
warnings.warn("CartesianRepresentationFrameAttribute has been renamed "
"to CartesianRepresentationAttribute.",
AstropyDeprecationWarning)
super().__init__(*args, **kwargs)
# do this here to prevent a series of complicated circular imports
from .earth import EarthLocation
from .representation import CartesianRepresentation, BaseDifferential
|
d615d3ce2b12dade0024a426fe9544f7fc9f77e72231b78b36d1fd608e904839 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Standard library
import re
import textwrap
import warnings
from datetime import datetime
from urllib.request import urlopen
# Third-party
from astropy import time as atime
from astropy.utils.console import color_print, _color_text
from . import get_sun
__all__ = []
class HumanError(ValueError): pass
class CelestialError(ValueError): pass
def get_sign(dt):
"""
"""
if ((int(dt.month) == 12 and int(dt.day) >= 22)or(int(dt.month) == 1 and int(dt.day) <= 19)):
zodiac_sign = "capricorn"
elif ((int(dt.month) == 1 and int(dt.day) >= 20)or(int(dt.month) == 2 and int(dt.day) <= 17)):
zodiac_sign = "aquarius"
elif ((int(dt.month) == 2 and int(dt.day) >= 18)or(int(dt.month) == 3 and int(dt.day) <= 19)):
zodiac_sign = "pisces"
elif ((int(dt.month) == 3 and int(dt.day) >= 20)or(int(dt.month) == 4 and int(dt.day) <= 19)):
zodiac_sign = "aries"
elif ((int(dt.month) == 4 and int(dt.day) >= 20)or(int(dt.month) == 5 and int(dt.day) <= 20)):
zodiac_sign = "taurus"
elif ((int(dt.month) == 5 and int(dt.day) >= 21)or(int(dt.month) == 6 and int(dt.day) <= 20)):
zodiac_sign = "gemini"
elif ((int(dt.month) == 6 and int(dt.day) >= 21)or(int(dt.month) == 7 and int(dt.day) <= 22)):
zodiac_sign = "cancer"
elif ((int(dt.month) == 7 and int(dt.day) >= 23)or(int(dt.month) == 8 and int(dt.day) <= 22)):
zodiac_sign = "leo"
elif ((int(dt.month) == 8 and int(dt.day) >= 23)or(int(dt.month) == 9 and int(dt.day) <= 22)):
zodiac_sign = "virgo"
elif ((int(dt.month) == 9 and int(dt.day) >= 23)or(int(dt.month) == 10 and int(dt.day) <= 22)):
zodiac_sign = "libra"
elif ((int(dt.month) == 10 and int(dt.day) >= 23)or(int(dt.month) == 11 and int(dt.day) <= 21)):
zodiac_sign = "scorpio"
elif ((int(dt.month) == 11 and int(dt.day) >= 22)or(int(dt.month) == 12 and int(dt.day) <= 21)):
zodiac_sign = "sagittarius"
return zodiac_sign
_VALID_SIGNS = ["capricorn", "aquarius", "pisces", "aries", "taurus", "gemini",
"cancer", "leo", "virgo", "libra", "scorpio", "sagittarius"]
# Some of the constellation names map to different astrological "sign names".
# Astrologers really needs to talk to the IAU...
_CONST_TO_SIGNS = {'capricornus': 'capricorn', 'scorpius': 'scorpio'}
_ZODIAC = ((1900, "rat"), (1901, "ox"), (1902, "tiger"),
(1903, "rabbit"), (1904, "dragon"), (1905, "snake"),
(1906, "horse"), (1907, "goat"), (1908, "monkey"),
(1909, "rooster"), (1910, "dog"), (1911, "pig"))
# https://stackoverflow.com/questions/12791871/chinese-zodiac-python-program
def _get_zodiac(yr):
return _ZODIAC[(yr - _ZODIAC[0][0]) % 12][1]
def horoscope(birthday, corrected=True, chinese=False):
"""
Enter your birthday as an `astropy.time.Time` object and
receive a mystical horoscope about things to come.
Parameter
---------
birthday : `astropy.time.Time` or str
Your birthday as a `datetime.datetime` or `astropy.time.Time` object
or "YYYY-MM-DD"string.
corrected : bool
Whether to account for the precession of the Earth instead of using the
ancient Greek dates for the signs. After all, you do want your *real*
horoscope, not a cheap inaccurate approximation, right?
chinese : bool
Chinese annual zodiac wisdom instead of Western one.
Returns
-------
Infinite wisdom, condensed into astrologically precise prose.
Notes
-----
This function was implemented on April 1. Take note of that date.
"""
today = datetime.now()
err_msg = "Invalid response from celestial gods (failed to load horoscope)."
special_words = {
'([sS]tar[s^ ]*)': 'yellow',
'([yY]ou[^ ]*)': 'magenta',
'([pP]lay[^ ]*)': 'blue',
'([hH]eart)': 'red',
'([fF]ate)': 'lightgreen',
}
if isinstance(birthday, str):
birthday = datetime.strptime(birthday, '%Y-%m-%d')
if chinese:
from bs4 import BeautifulSoup
# TODO: Make this more accurate by using the actual date, not just year
# Might need third-party tool like https://pypi.python.org/pypi/lunardate
zodiac_sign = _get_zodiac(birthday.year)
url = ('https://www.horoscope.com/us/horoscopes/yearly/'
'{}-chinese-horoscope-{}.aspx'.format(today.year, zodiac_sign))
summ_title_sfx = 'in {}'.format(today.year)
try:
with urlopen(url) as f:
try:
doc = BeautifulSoup(f, 'html.parser')
# TODO: Also include Love, Family & Friends, Work, Money, More?
item = doc.find(id='overview')
desc = item.getText()
except Exception:
raise CelestialError(err_msg)
except Exception:
raise CelestialError(err_msg)
else:
from xml.dom.minidom import parse
birthday = atime.Time(birthday)
if corrected:
with warnings.catch_warnings():
warnings.simplefilter('ignore') # Ignore ErfaWarning
zodiac_sign = get_sun(birthday).get_constellation().lower()
zodiac_sign = _CONST_TO_SIGNS.get(zodiac_sign, zodiac_sign)
if zodiac_sign not in _VALID_SIGNS:
raise HumanError('On your birthday the sun was in {}, which is not '
'a sign of the zodiac. You must not exist. Or '
'maybe you can settle for '
'corrected=False.'.format(zodiac_sign.title()))
else:
zodiac_sign = get_sign(birthday.to_datetime())
url = "http://www.findyourfate.com/rss/dailyhoroscope-feed.php?sign={sign}&id=45"
summ_title_sfx = 'on {}'.format(today.strftime("%Y-%m-%d"))
with urlopen(url.format(sign=zodiac_sign.capitalize())) as f:
try:
doc = parse(f)
item = doc.getElementsByTagName('item')[0]
desc = item.getElementsByTagName('description')[0].childNodes[0].nodeValue
except Exception:
raise CelestialError(err_msg)
print("*"*79)
color_print("Horoscope for {} {}:".format(zodiac_sign.capitalize(), summ_title_sfx),
'green')
print("*"*79)
for block in textwrap.wrap(desc, 79):
split_block = block.split()
for i, word in enumerate(split_block):
for re_word in special_words.keys():
match = re.search(re_word, word)
if match is None:
continue
split_block[i] = _color_text(match.groups()[0], special_words[re_word])
print(" ".join(split_block))
def inject_horoscope():
import astropy
astropy._yourfuture = horoscope
inject_horoscope()
|
8885352bc5619f8d46669ee832c51cc7be9fb0458cfe64455d0ae5afd67e2829 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains standard functions for earth orientation, such as
precession and nutation.
This module is (currently) not intended to be part of the public API, but
is instead primarily for internal use in `coordinates`
"""
import numpy as np
from astropy.time import Time
from astropy import units as u
from .matrix_utilities import rotation_matrix, matrix_product, matrix_transpose
jd1950 = Time('B1950').jd
jd2000 = Time('J2000').jd
_asecperrad = u.radian.to(u.arcsec)
def eccentricity(jd):
"""
Eccentricity of the Earth's orbit at the requested Julian Date.
Parameters
----------
jd : scalar or array-like
Julian date at which to compute the eccentricity
returns
-------
eccentricity : scalar or array
The eccentricity (or array of eccentricities)
References
----------
* Explanatory Supplement to the Astronomical Almanac: P. Kenneth
Seidelmann (ed), University Science Books (1992).
"""
T = (jd - jd1950) / 36525.0
p = (-0.000000126, - 0.00004193, 0.01673011)
return np.polyval(p, T)
def mean_lon_of_perigee(jd):
"""
Computes the mean longitude of perigee of the Earth's orbit at the
requested Julian Date.
Parameters
----------
jd : scalar or array-like
Julian date at which to compute the mean longitude of perigee
returns
-------
mean_lon_of_perigee : scalar or array
Mean longitude of perigee in degrees (or array of mean longitudes)
References
----------
* Explanatory Supplement to the Astronomical Almanac: P. Kenneth
Seidelmann (ed), University Science Books (1992).
"""
T = (jd - jd1950) / 36525.0
p = (0.012, 1.65, 6190.67, 1015489.951)
return np.polyval(p, T) / 3600.
def obliquity(jd, algorithm=2006):
"""
Computes the obliquity of the Earth at the requested Julian Date.
Parameters
----------
jd : scalar or array-like
Julian date at which to compute the obliquity
algorithm : int
Year of algorithm based on IAU adoption. Can be 2006, 2000 or 1980. The
2006 algorithm is mentioned in Circular 179, but the canonical reference
for the IAU adoption is apparently Hilton et al. 06 is composed of the
1980 algorithm with a precession-rate correction due to the 2000
precession models, and a description of the 1980 algorithm can be found
in the Explanatory Supplement to the Astronomical Almanac.
returns
-------
obliquity : scalar or array
Mean obliquity in degrees (or array of obliquities)
References
----------
* Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351. 2000
* USNO Circular 179
* Explanatory Supplement to the Astronomical Almanac: P. Kenneth
Seidelmann (ed), University Science Books (1992).
"""
T = (jd - jd2000) / 36525.0
if algorithm == 2006:
p = (-0.0000000434, -0.000000576, 0.00200340, -0.0001831, -46.836769, 84381.406)
corr = 0
elif algorithm == 2000:
p = (0.001813, -0.00059, -46.8150, 84381.448)
corr = -0.02524 * T
elif algorithm == 1980:
p = (0.001813, -0.00059, -46.8150, 84381.448)
corr = 0
else:
raise ValueError('invalid algorithm year for computing obliquity')
return (np.polyval(p, T) + corr) / 3600.
# TODO: replace this with SOFA equivalent
def precession_matrix_Capitaine(fromepoch, toepoch):
"""
Computes the precession matrix from one Julian epoch to another.
The exact method is based on Capitaine et al. 2003, which should
match the IAU 2006 standard.
Parameters
----------
fromepoch : `~astropy.time.Time`
The epoch to precess from.
toepoch : `~astropy.time.Time`
The epoch to precess to.
Returns
-------
pmatrix : 3x3 array
Precession matrix to get from ``fromepoch`` to ``toepoch``
References
----------
USNO Circular 179
"""
mat_fromto2000 = matrix_transpose(
_precess_from_J2000_Capitaine(fromepoch.jyear))
mat_2000toto = _precess_from_J2000_Capitaine(toepoch.jyear)
return np.dot(mat_2000toto, mat_fromto2000)
def _precess_from_J2000_Capitaine(epoch):
"""
Computes the precession matrix from J2000 to the given Julian Epoch.
Expression from from Capitaine et al. 2003 as expressed in the USNO
Circular 179. This should match the IAU 2006 standard from SOFA.
Parameters
----------
epoch : scalar
The epoch as a Julian year number (e.g. J2000 is 2000.0)
"""
T = (epoch - 2000.0) / 100.0
# from USNO circular
pzeta = (-0.0000003173, -0.000005971, 0.01801828, 0.2988499, 2306.083227, 2.650545)
pz = (-0.0000002904, -0.000028596, 0.01826837, 1.0927348, 2306.077181, -2.650545)
ptheta = (-0.0000001274, -0.000007089, -0.04182264, -0.4294934, 2004.191903, 0)
zeta = np.polyval(pzeta, T) / 3600.0
z = np.polyval(pz, T) / 3600.0
theta = np.polyval(ptheta, T) / 3600.0
return matrix_product(rotation_matrix(-z, 'z'),
rotation_matrix(theta, 'y'),
rotation_matrix(-zeta, 'z'))
def _precession_matrix_besselian(epoch1, epoch2):
"""
Computes the precession matrix from one Besselian epoch to another using
Newcomb's method.
``epoch1`` and ``epoch2`` are in Besselian year numbers.
"""
# tropical years
t1 = (epoch1 - 1850.0) / 1000.0
t2 = (epoch2 - 1850.0) / 1000.0
dt = t2 - t1
zeta1 = 23035.545 + t1 * 139.720 + 0.060 * t1 * t1
zeta2 = 30.240 - 0.27 * t1
zeta3 = 17.995
pzeta = (zeta3, zeta2, zeta1, 0)
zeta = np.polyval(pzeta, dt) / 3600
z1 = 23035.545 + t1 * 139.720 + 0.060 * t1 * t1
z2 = 109.480 + 0.39 * t1
z3 = 18.325
pz = (z3, z2, z1, 0)
z = np.polyval(pz, dt) / 3600
theta1 = 20051.12 - 85.29 * t1 - 0.37 * t1 * t1
theta2 = -42.65 - 0.37 * t1
theta3 = -41.8
ptheta = (theta3, theta2, theta1, 0)
theta = np.polyval(ptheta, dt) / 3600
return matrix_product(rotation_matrix(-z, 'z'),
rotation_matrix(theta, 'y'),
rotation_matrix(-zeta, 'z'))
def _load_nutation_data(datastr, seriestype):
"""
Loads nutation series from data stored in string form.
Seriestype can be 'lunisolar' or 'planetary'
"""
if seriestype == 'lunisolar':
dtypes = [('nl', int),
('nlp', int),
('nF', int),
('nD', int),
('nOm', int),
('ps', float),
('pst', float),
('pc', float),
('ec', float),
('ect', float),
('es', float)]
elif seriestype == 'planetary':
dtypes = [('nl', int),
('nF', int),
('nD', int),
('nOm', int),
('nme', int),
('nve', int),
('nea', int),
('nma', int),
('nju', int),
('nsa', int),
('nur', int),
('nne', int),
('npa', int),
('sp', int),
('cp', int),
('se', int),
('ce', int)]
else:
raise ValueError('requested invalid nutation series type')
lines = [l for l in datastr.split('\n')
if not l.startswith('#') if not l.strip() == '']
lists = [[] for _ in dtypes]
for l in lines:
for i, e in enumerate(l.split(' ')):
lists[i].append(dtypes[i][1](e))
return np.rec.fromarrays(lists, names=[e[0] for e in dtypes])
_nut_data_00b = """
#l lprime F D Omega longitude_sin longitude_sin*t longitude_cos obliquity_cos obliquity_cos*t,obliquity_sin
0 0 0 0 1 -172064161.0 -174666.0 33386.0 92052331.0 9086.0 15377.0
0 0 2 -2 2 -13170906.0 -1675.0 -13696.0 5730336.0 -3015.0 -4587.0
0 0 2 0 2 -2276413.0 -234.0 2796.0 978459.0 -485.0 1374.0
0 0 0 0 2 2074554.0 207.0 -698.0 -897492.0 470.0 -291.0
0 1 0 0 0 1475877.0 -3633.0 11817.0 73871.0 -184.0 -1924.0
0 1 2 -2 2 -516821.0 1226.0 -524.0 224386.0 -677.0 -174.0
1 0 0 0 0 711159.0 73.0 -872.0 -6750.0 0.0 358.0
0 0 2 0 1 -387298.0 -367.0 380.0 200728.0 18.0 318.0
1 0 2 0 2 -301461.0 -36.0 816.0 129025.0 -63.0 367.0
0 -1 2 -2 2 215829.0 -494.0 111.0 -95929.0 299.0 132.0
0 0 2 -2 1 128227.0 137.0 181.0 -68982.0 -9.0 39.0
-1 0 2 0 2 123457.0 11.0 19.0 -53311.0 32.0 -4.0
-1 0 0 2 0 156994.0 10.0 -168.0 -1235.0 0.0 82.0
1 0 0 0 1 63110.0 63.0 27.0 -33228.0 0.0 -9.0
-1 0 0 0 1 -57976.0 -63.0 -189.0 31429.0 0.0 -75.0
-1 0 2 2 2 -59641.0 -11.0 149.0 25543.0 -11.0 66.0
1 0 2 0 1 -51613.0 -42.0 129.0 26366.0 0.0 78.0
-2 0 2 0 1 45893.0 50.0 31.0 -24236.0 -10.0 20.0
0 0 0 2 0 63384.0 11.0 -150.0 -1220.0 0.0 29.0
0 0 2 2 2 -38571.0 -1.0 158.0 16452.0 -11.0 68.0
0 -2 2 -2 2 32481.0 0.0 0.0 -13870.0 0.0 0.0
-2 0 0 2 0 -47722.0 0.0 -18.0 477.0 0.0 -25.0
2 0 2 0 2 -31046.0 -1.0 131.0 13238.0 -11.0 59.0
1 0 2 -2 2 28593.0 0.0 -1.0 -12338.0 10.0 -3.0
-1 0 2 0 1 20441.0 21.0 10.0 -10758.0 0.0 -3.0
2 0 0 0 0 29243.0 0.0 -74.0 -609.0 0.0 13.0
0 0 2 0 0 25887.0 0.0 -66.0 -550.0 0.0 11.0
0 1 0 0 1 -14053.0 -25.0 79.0 8551.0 -2.0 -45.0
-1 0 0 2 1 15164.0 10.0 11.0 -8001.0 0.0 -1.0
0 2 2 -2 2 -15794.0 72.0 -16.0 6850.0 -42.0 -5.0
0 0 -2 2 0 21783.0 0.0 13.0 -167.0 0.0 13.0
1 0 0 -2 1 -12873.0 -10.0 -37.0 6953.0 0.0 -14.0
0 -1 0 0 1 -12654.0 11.0 63.0 6415.0 0.0 26.0
-1 0 2 2 1 -10204.0 0.0 25.0 5222.0 0.0 15.0
0 2 0 0 0 16707.0 -85.0 -10.0 168.0 -1.0 10.0
1 0 2 2 2 -7691.0 0.0 44.0 3268.0 0.0 19.0
-2 0 2 0 0 -11024.0 0.0 -14.0 104.0 0.0 2.0
0 1 2 0 2 7566.0 -21.0 -11.0 -3250.0 0.0 -5.0
0 0 2 2 1 -6637.0 -11.0 25.0 3353.0 0.0 14.0
0 -1 2 0 2 -7141.0 21.0 8.0 3070.0 0.0 4.0
0 0 0 2 1 -6302.0 -11.0 2.0 3272.0 0.0 4.0
1 0 2 -2 1 5800.0 10.0 2.0 -3045.0 0.0 -1.0
2 0 2 -2 2 6443.0 0.0 -7.0 -2768.0 0.0 -4.0
-2 0 0 2 1 -5774.0 -11.0 -15.0 3041.0 0.0 -5.0
2 0 2 0 1 -5350.0 0.0 21.0 2695.0 0.0 12.0
0 -1 2 -2 1 -4752.0 -11.0 -3.0 2719.0 0.0 -3.0
0 0 0 -2 1 -4940.0 -11.0 -21.0 2720.0 0.0 -9.0
-1 -1 0 2 0 7350.0 0.0 -8.0 -51.0 0.0 4.0
2 0 0 -2 1 4065.0 0.0 6.0 -2206.0 0.0 1.0
1 0 0 2 0 6579.0 0.0 -24.0 -199.0 0.0 2.0
0 1 2 -2 1 3579.0 0.0 5.0 -1900.0 0.0 1.0
1 -1 0 0 0 4725.0 0.0 -6.0 -41.0 0.0 3.0
-2 0 2 0 2 -3075.0 0.0 -2.0 1313.0 0.0 -1.0
3 0 2 0 2 -2904.0 0.0 15.0 1233.0 0.0 7.0
0 -1 0 2 0 4348.0 0.0 -10.0 -81.0 0.0 2.0
1 -1 2 0 2 -2878.0 0.0 8.0 1232.0 0.0 4.0
0 0 0 1 0 -4230.0 0.0 5.0 -20.0 0.0 -2.0
-1 -1 2 2 2 -2819.0 0.0 7.0 1207.0 0.0 3.0
-1 0 2 0 0 -4056.0 0.0 5.0 40.0 0.0 -2.0
0 -1 2 2 2 -2647.0 0.0 11.0 1129.0 0.0 5.0
-2 0 0 0 1 -2294.0 0.0 -10.0 1266.0 0.0 -4.0
1 1 2 0 2 2481.0 0.0 -7.0 -1062.0 0.0 -3.0
2 0 0 0 1 2179.0 0.0 -2.0 -1129.0 0.0 -2.0
-1 1 0 1 0 3276.0 0.0 1.0 -9.0 0.0 0.0
1 1 0 0 0 -3389.0 0.0 5.0 35.0 0.0 -2.0
1 0 2 0 0 3339.0 0.0 -13.0 -107.0 0.0 1.0
-1 0 2 -2 1 -1987.0 0.0 -6.0 1073.0 0.0 -2.0
1 0 0 0 2 -1981.0 0.0 0.0 854.0 0.0 0.0
-1 0 0 1 0 4026.0 0.0 -353.0 -553.0 0.0 -139.0
0 0 2 1 2 1660.0 0.0 -5.0 -710.0 0.0 -2.0
-1 0 2 4 2 -1521.0 0.0 9.0 647.0 0.0 4.0
-1 1 0 1 1 1314.0 0.0 0.0 -700.0 0.0 0.0
0 -2 2 -2 1 -1283.0 0.0 0.0 672.0 0.0 0.0
1 0 2 2 1 -1331.0 0.0 8.0 663.0 0.0 4.0
-2 0 2 2 2 1383.0 0.0 -2.0 -594.0 0.0 -2.0
-1 0 0 0 2 1405.0 0.0 4.0 -610.0 0.0 2.0
1 1 2 -2 2 1290.0 0.0 0.0 -556.0 0.0 0.0
"""[1:-1]
_nut_data_00b = _load_nutation_data(_nut_data_00b, 'lunisolar')
# TODO: replace w/SOFA equivalent
def nutation_components2000B(jd):
"""
Computes nutation components following the IAU 2000B specification
Parameters
----------
jd : scalar
epoch at which to compute the nutation components as a JD
Returns
-------
eps : float
epsilon in radians
dpsi : float
dpsi in radians
deps : float
depsilon in raidans
"""
epsa = np.radians(obliquity(jd, 2000))
t = (jd - jd2000) / 36525
# Fundamental (Delaunay) arguments from Simon et al. (1994) via SOFA
# Mean anomaly of moon
el = ((485868.249036 + 1717915923.2178 * t) % 1296000) / _asecperrad
# Mean anomaly of sun
elp = ((1287104.79305 + 129596581.0481 * t) % 1296000) / _asecperrad
# Mean argument of the latitude of Moon
F = ((335779.526232 + 1739527262.8478 * t) % 1296000) / _asecperrad
# Mean elongation of the Moon from Sun
D = ((1072260.70369 + 1602961601.2090 * t) % 1296000) / _asecperrad
# Mean longitude of the ascending node of Moon
Om = ((450160.398036 + -6962890.5431 * t) % 1296000) / _asecperrad
# compute nutation series using array loaded from data directory
dat = _nut_data_00b
arg = dat.nl * el + dat.nlp * elp + dat.nF * F + dat.nD * D + dat.nOm * Om
sarg = np.sin(arg)
carg = np.cos(arg)
p1u_asecperrad = _asecperrad * 1e7 # 0.1 microasrcsecperrad
dpsils = np.sum((dat.ps + dat.pst * t) * sarg + dat.pc * carg) / p1u_asecperrad
depsls = np.sum((dat.ec + dat.ect * t) * carg + dat.es * sarg) / p1u_asecperrad
# fixed offset in place of planetary tersm
m_asecperrad = _asecperrad * 1e3 # milliarcsec per rad
dpsipl = -0.135 / m_asecperrad
depspl = 0.388 / m_asecperrad
return epsa, dpsils + dpsipl, depsls + depspl # all in radians
def nutation_matrix(epoch):
"""
Nutation matrix generated from nutation components.
Matrix converts from mean coordinate to true coordinate as
r_true = M * r_mean
"""
# TODO: implement higher precision 2006/2000A model if requested/needed
epsa, dpsi, deps = nutation_components2000B(epoch.jd) # all in radians
return matrix_product(rotation_matrix(-(epsa + deps), 'x', False),
rotation_matrix(-dpsi, 'z', False),
rotation_matrix(epsa, 'x', False))
|
35024ac8379c4fb6b98fbbe25ce4807cad9de2401f0b4fa9333af654f69d84ee | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import contextlib
import pathlib
import re
import sys
import inspect
import os
from collections import OrderedDict
from operator import itemgetter
import numpy as np
__all__ = ['register_reader', 'register_writer', 'register_identifier',
'identify_format', 'get_reader', 'get_writer', 'read', 'write',
'get_formats', 'IORegistryError', 'delay_doc_updates',
'UnifiedReadWriteMethod', 'UnifiedReadWrite']
__doctest_skip__ = ['register_identifier']
_readers = OrderedDict()
_writers = OrderedDict()
_identifiers = OrderedDict()
PATH_TYPES = (str, pathlib.Path)
class IORegistryError(Exception):
"""Custom error for registry clashes.
"""
pass
# If multiple formats are added to one class the update of the docs is quite
# expensive. Classes for which the doc update is temporarly delayed are added
# to this set.
_delayed_docs_classes = set()
@contextlib.contextmanager
def delay_doc_updates(cls):
"""Contextmanager to disable documentation updates when registering
reader and writer. The documentation is only built once when the
contextmanager exits.
.. versionadded:: 1.3
Parameters
----------
cls : class
Class for which the documentation updates should be delayed.
Notes
-----
Registering multiple readers and writers can cause significant overhead
because the documentation of the corresponding ``read`` and ``write``
methods are build every time.
.. warning::
This contextmanager is experimental and may be replaced by a more
general approach.
Examples
--------
see for example the source code of ``astropy.table.__init__``.
"""
_delayed_docs_classes.add(cls)
yield
_delayed_docs_classes.discard(cls)
_update__doc__(cls, 'read')
_update__doc__(cls, 'write')
def get_formats(data_class=None, readwrite=None):
"""
Get the list of registered I/O formats as a Table.
Parameters
----------
data_class : classobj, optional
Filter readers/writer to match data class (default = all classes).
readwrite : str or None, optional
Search only for readers (``"Read"``) or writers (``"Write"``). If None
search for both. Default is None.
.. versionadded:: 1.3
Returns
-------
format_table : Table
Table of available I/O formats.
"""
from astropy.table import Table
format_classes = sorted(set(_readers) | set(_writers), key=itemgetter(0))
rows = []
for format_class in format_classes:
if (data_class is not None and not _is_best_match(
data_class, format_class[1], format_classes)):
continue
has_read = 'Yes' if format_class in _readers else 'No'
has_write = 'Yes' if format_class in _writers else 'No'
has_identify = 'Yes' if format_class in _identifiers else 'No'
# Check if this is a short name (e.g. 'rdb') which is deprecated in
# favor of the full 'ascii.rdb'.
ascii_format_class = ('ascii.' + format_class[0], format_class[1])
deprecated = 'Yes' if ascii_format_class in format_classes else ''
rows.append((format_class[1].__name__, format_class[0], has_read,
has_write, has_identify, deprecated))
if readwrite is not None:
if readwrite == 'Read':
rows = [row for row in rows if row[2] == 'Yes']
elif readwrite == 'Write':
rows = [row for row in rows if row[3] == 'Yes']
else:
raise ValueError('unrecognized value for "readwrite": {0}.\n'
'Allowed are "Read" and "Write" and None.')
# Sorting the list of tuples is much faster than sorting it after the table
# is created. (#5262)
if rows:
# Indices represent "Data Class", "Deprecated" and "Format".
data = list(zip(*sorted(rows, key=itemgetter(0, 5, 1))))
else:
data = None
format_table = Table(data, names=('Data class', 'Format', 'Read', 'Write',
'Auto-identify', 'Deprecated'))
if not np.any(format_table['Deprecated'] == 'Yes'):
format_table.remove_column('Deprecated')
return format_table
def _update__doc__(data_class, readwrite):
"""
Update the docstring to include all the available readers / writers for the
``data_class.read`` or ``data_class.write`` functions (respectively).
"""
FORMATS_TEXT = 'The available built-in formats are:'
# Get the existing read or write method and its docstring
class_readwrite_func = getattr(data_class, readwrite)
if not isinstance(class_readwrite_func.__doc__, str):
# No docstring--could just be test code, or possibly code compiled
# without docstrings
return
lines = class_readwrite_func.__doc__.splitlines()
# Find the location of the existing formats table if it exists
sep_indices = [ii for ii, line in enumerate(lines) if FORMATS_TEXT in line]
if sep_indices:
# Chop off the existing formats table, including the initial blank line
chop_index = sep_indices[0]
lines = lines[:chop_index]
# Find the minimum indent, skipping the first line because it might be odd
matches = [re.search(r'(\S)', line) for line in lines[1:]]
left_indent = ' ' * min(match.start() for match in matches if match)
# Get the available unified I/O formats for this class
# Include only formats that have a reader, and drop the 'Data class' column
format_table = get_formats(data_class, readwrite.capitalize())
format_table.remove_column('Data class')
# Get the available formats as a table, then munge the output of pformat()
# a bit and put it into the docstring.
new_lines = format_table.pformat(max_lines=-1, max_width=80)
table_rst_sep = re.sub('-', '=', new_lines[1])
new_lines[1] = table_rst_sep
new_lines.insert(0, table_rst_sep)
new_lines.append(table_rst_sep)
# Check for deprecated names and include a warning at the end.
if 'Deprecated' in format_table.colnames:
new_lines.extend(['',
'Deprecated format names like ``aastex`` will be '
'removed in a future version. Use the full ',
'name (e.g. ``ascii.aastex``) instead.'])
new_lines = [FORMATS_TEXT, ''] + new_lines
lines.extend([left_indent + line for line in new_lines])
# Depending on Python version and whether class_readwrite_func is
# an instancemethod or classmethod, one of the following will work.
if isinstance(class_readwrite_func, UnifiedReadWrite):
class_readwrite_func.__class__.__doc__ = '\n'.join(lines)
else:
try:
class_readwrite_func.__doc__ = '\n'.join(lines)
except AttributeError:
class_readwrite_func.__func__.__doc__ = '\n'.join(lines)
def register_reader(data_format, data_class, function, force=False):
"""
Register a reader function.
Parameters
----------
data_format : str
The data format identifier. This is the string that will be used to
specify the data type when reading.
data_class : classobj
The class of the object that the reader produces.
function : function
The function to read in a data object.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
"""
if not (data_format, data_class) in _readers or force:
_readers[(data_format, data_class)] = function
else:
raise IORegistryError("Reader for format '{0}' and class '{1}' is "
'already defined'
''.format(data_format, data_class.__name__))
if data_class not in _delayed_docs_classes:
_update__doc__(data_class, 'read')
def unregister_reader(data_format, data_class):
"""
Unregister a reader function
Parameters
----------
data_format : str
The data format identifier.
data_class : classobj
The class of the object that the reader produces.
"""
if (data_format, data_class) in _readers:
_readers.pop((data_format, data_class))
else:
raise IORegistryError("No reader defined for format '{0}' and class '{1}'"
''.format(data_format, data_class.__name__))
if data_class not in _delayed_docs_classes:
_update__doc__(data_class, 'read')
def register_writer(data_format, data_class, function, force=False):
"""
Register a table writer function.
Parameters
----------
data_format : str
The data format identifier. This is the string that will be used to
specify the data type when writing.
data_class : classobj
The class of the object that can be written.
function : function
The function to write out a data object.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
"""
if not (data_format, data_class) in _writers or force:
_writers[(data_format, data_class)] = function
else:
raise IORegistryError("Writer for format '{0}' and class '{1}' is "
'already defined'
''.format(data_format, data_class.__name__))
if data_class not in _delayed_docs_classes:
_update__doc__(data_class, 'write')
def unregister_writer(data_format, data_class):
"""
Unregister a writer function
Parameters
----------
data_format : str
The data format identifier.
data_class : classobj
The class of the object that can be written.
"""
if (data_format, data_class) in _writers:
_writers.pop((data_format, data_class))
else:
raise IORegistryError("No writer defined for format '{0}' and class '{1}'"
''.format(data_format, data_class.__name__))
if data_class not in _delayed_docs_classes:
_update__doc__(data_class, 'write')
def register_identifier(data_format, data_class, identifier, force=False):
"""
Associate an identifier function with a specific data type.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : classobj
The class of the object that can be written.
identifier : function
A function that checks the argument specified to `read` or `write` to
determine whether the input can be interpreted as a table of type
``data_format``. This function should take the following arguments:
- ``origin``: A string ``"read"`` or ``"write"`` identifying whether
the file is to be opened for reading or writing.
- ``path``: The path to the file.
- ``fileobj``: An open file object to read the file's contents, or
`None` if the file could not be opened.
- ``*args``: Positional arguments for the `read` or `write`
function.
- ``**kwargs``: Keyword arguments for the `read` or `write`
function.
One or both of ``path`` or ``fileobj`` may be `None`. If they are
both `None`, the identifier will need to work from ``args[0]``.
The function should return True if the input can be identified
as being of format ``data_format``, and False otherwise.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
Examples
--------
To set the identifier based on extensions, for formats that take a
filename as a first argument, you can do for example::
>>> def my_identifier(*args, **kwargs):
... return isinstance(args[0], str) and args[0].endswith('.tbl')
>>> register_identifier('ipac', Table, my_identifier)
"""
if not (data_format, data_class) in _identifiers or force:
_identifiers[(data_format, data_class)] = identifier
else:
raise IORegistryError("Identifier for format '{0}' and class '{1}' is "
'already defined'.format(data_format,
data_class.__name__))
def unregister_identifier(data_format, data_class):
"""
Unregister an identifier function
Parameters
----------
data_format : str
The data format identifier.
data_class : classobj
The class of the object that can be read/written.
"""
if (data_format, data_class) in _identifiers:
_identifiers.pop((data_format, data_class))
else:
raise IORegistryError("No identifier defined for format '{0}' and class"
" '{1}'".format(data_format, data_class.__name__))
def identify_format(origin, data_class_required, path, fileobj, args, kwargs):
"""Loop through identifiers to see which formats match.
Parameters
----------
origin : str
A string ``"read`` or ``"write"`` identifying whether the file is to be
opened for reading or writing.
data_class_required : object
The specified class for the result of `read` or the class that is to be
written.
path : str, other path object or None
The path to the file or None.
fileobj : File object or None.
An open file object to read the file's contents, or ``None`` if the
file could not be opened.
args : sequence
Positional arguments for the `read` or `write` function. Note that
these must be provided as sequence.
kwargs : dict-like
Keyword arguments for the `read` or `write` function. Note that this
parameter must be `dict`-like.
Returns
-------
valid_formats : list
List of matching formats.
"""
valid_formats = []
for data_format, data_class in _identifiers:
if _is_best_match(data_class_required, data_class, _identifiers):
if _identifiers[(data_format, data_class)](
origin, path, fileobj, *args, **kwargs):
valid_formats.append(data_format)
return valid_formats
def _get_format_table_str(data_class, readwrite):
format_table = get_formats(data_class, readwrite=readwrite)
format_table.remove_column('Data class')
format_table_str = '\n'.join(format_table.pformat(max_lines=-1))
return format_table_str
def get_reader(data_format, data_class):
"""Get reader for ``data_format``.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : classobj
The class of the object that can be written.
Returns
-------
reader : callable
The registered reader function for this format and class.
"""
readers = [(fmt, cls) for fmt, cls in _readers if fmt == data_format]
for reader_format, reader_class in readers:
if _is_best_match(data_class, reader_class, readers):
return _readers[(reader_format, reader_class)]
else:
format_table_str = _get_format_table_str(data_class, 'Read')
raise IORegistryError(
"No reader defined for format '{0}' and class '{1}'.\n\nThe "
"available formats are:\n\n{2}".format(
data_format, data_class.__name__, format_table_str))
def get_writer(data_format, data_class):
"""Get writer for ``data_format``.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : classobj
The class of the object that can be written.
Returns
-------
writer : callable
The registered writer function for this format and class.
"""
writers = [(fmt, cls) for fmt, cls in _writers if fmt == data_format]
for writer_format, writer_class in writers:
if _is_best_match(data_class, writer_class, writers):
return _writers[(writer_format, writer_class)]
else:
format_table_str = _get_format_table_str(data_class, 'Write')
raise IORegistryError(
"No writer defined for format '{0}' and class '{1}'.\n\nThe "
"available formats are:\n\n{2}".format(
data_format, data_class.__name__, format_table_str))
def read(cls, *args, format=None, **kwargs):
"""
Read in data.
The arguments passed to this method depend on the format.
"""
ctx = None
try:
if format is None:
path = None
fileobj = None
if len(args):
if isinstance(args[0], PATH_TYPES):
from astropy.utils.data import get_readable_fileobj
# path might be a pathlib.Path object
if isinstance(args[0], pathlib.Path):
args = (str(args[0]),) + args[1:]
path = args[0]
try:
ctx = get_readable_fileobj(args[0], encoding='binary')
fileobj = ctx.__enter__()
except OSError:
raise
except Exception:
fileobj = None
else:
args = [fileobj] + list(args[1:])
elif hasattr(args[0], 'read'):
path = None
fileobj = args[0]
format = _get_valid_format(
'read', cls, path, fileobj, args, kwargs)
reader = get_reader(format, cls)
data = reader(*args, **kwargs)
if not isinstance(data, cls):
# User has read with a subclass where only the parent class is
# registered. This returns the parent class, so try coercing
# to desired subclass.
try:
data = cls(data)
except Exception:
raise TypeError('could not convert reader output to {0} '
'class.'.format(cls.__name__))
finally:
if ctx is not None:
ctx.__exit__(*sys.exc_info())
return data
def write(data, *args, format=None, **kwargs):
"""
Write out data.
The arguments passed to this method depend on the format.
"""
if format is None:
path = None
fileobj = None
if len(args):
if isinstance(args[0], PATH_TYPES):
# path might be a pathlib.Path object
if isinstance(args[0], pathlib.Path):
args = (str(args[0]),) + args[1:]
path = args[0]
fileobj = None
elif hasattr(args[0], 'read'):
path = None
fileobj = args[0]
format = _get_valid_format(
'write', data.__class__, path, fileobj, args, kwargs)
writer = get_writer(format, data.__class__)
writer(data, *args, **kwargs)
def _is_best_match(class1, class2, format_classes):
"""
Determine if class2 is the "best" match for class1 in the list
of classes. It is assumed that (class2 in classes) is True.
class2 is the the best match if:
- ``class1`` is a subclass of ``class2`` AND
- ``class2`` is the nearest ancestor of ``class1`` that is in classes
(which includes the case that ``class1 is class2``)
"""
if issubclass(class1, class2):
classes = {cls for fmt, cls in format_classes}
for parent in class1.__mro__:
if parent is class2: # class2 is closest registered ancestor
return True
if parent in classes: # class2 was superceded
return False
return False
def _get_valid_format(mode, cls, path, fileobj, args, kwargs):
"""
Returns the first valid format that can be used to read/write the data in
question. Mode can be either 'read' or 'write'.
"""
valid_formats = identify_format(mode, cls, path, fileobj, args, kwargs)
if len(valid_formats) == 0:
format_table_str = _get_format_table_str(cls, mode.capitalize())
raise IORegistryError("Format could not be identified.\n"
"The available formats are:\n"
"{0}".format(format_table_str))
elif len(valid_formats) > 1:
raise IORegistryError(
"Format is ambiguous - options are: {0}".format(
', '.join(sorted(valid_formats, key=itemgetter(0)))))
return valid_formats[0]
class UnifiedReadWrite:
"""Base class for the worker object used in unified read() or write() methods.
This lightweight object is created for each `read()` or `write()` call
via ``read`` / ``write`` descriptors on the data object class. The key
driver is to allow complete format-specific documentation of available
method options via a ``help()`` method, e.g. ``Table.read.help('fits')``.
Subclasses must define a ``__call__`` method which is what actually gets
called when the data object ``read()`` or ``write()`` method is called.
For the canonical example see the `~astropy.table.Table` class
implementation (in particular the ``connect.py`` module there).
Parameters
----------
instance : object
Descriptor calling instance or None if no instance
cls : type
Descriptor calling class (either owner class or instance class)
method_name : str
Method name, either 'read' or 'write'
"""
def __init__(self, instance, cls, method_name):
self._instance = instance
self._cls = cls
self._method_name = method_name # 'read' or 'write'
def help(self, format=None, out=None):
"""Output help documentation for the specified unified I/O ``format``.
By default the help output is printed to the console via ``pydoc.pager``.
Instead one can supplied a file handle object as ``out`` and the output
will be written to that handle.
Parameters
----------
format : str
Unified I/O format name, e.g. 'fits' or 'ascii.ecsv'
out : None or file handle object
Output destination (default is stdout via a pager)
"""
cls = self._cls
method_name = self._method_name
# Get reader or writer function
get_func = get_reader if method_name == 'read' else get_writer
try:
if format:
read_write_func = get_func(format, cls)
except IORegistryError as err:
reader_doc = 'ERROR: ' + str(err)
else:
if format:
# Format-specific
header = ("{}.{}(format='{}') documentation\n"
.format(cls.__name__, method_name, format))
doc = read_write_func.__doc__
else:
# General docs
header = ('{}.{} general documentation\n'
.format(cls.__name__, method_name))
doc = getattr(cls, method_name).__doc__
reader_doc = re.sub('.', '=', header)
reader_doc += header
reader_doc += re.sub('.', '=', header)
reader_doc += os.linesep
reader_doc += inspect.cleandoc(doc)
if out is None:
import pydoc
pydoc.pager(reader_doc)
else:
out.write(reader_doc)
def list_formats(self, out=None):
"""Print a list of available formats to console (or ``out`` filehandle)
out : None or file handle object
Output destination (default is stdout via a pager)
"""
tbl = get_formats(self._cls, self._method_name.capitalize())
del tbl['Data class']
if out is None:
tbl.pprint(max_lines=-1, max_width=-1)
else:
out.write('\n'.join(tbl.pformat(max_lines=-1, max_width=-1)))
return out
class UnifiedReadWriteMethod:
"""Descriptor class for creating read() and write() methods in unified I/O.
The canonical example is in the ``Table`` class, where the ``connect.py``
module creates subclasses of the ``UnifiedReadWrite`` class. These have
custom ``__call__`` methods that do the setup work related to calling the
registry read() or write() functions. With this, the ``Table`` class
defines read and write methods as follows::
read = UnifiedReadWriteMethod(TableRead)
write = UnifiedReadWriteMethod(TableWrite)
Parameters
----------
func : `~astropy.io.registry.UnifiedReadWrite` subclass
Class that defines read or write functionality
"""
def __init__(self, func):
self.func = func
def __get__(self, instance, owner_cls):
return self.func(instance, owner_cls)
|
888ac6a464458ded64229e052d9efc8a5c9e30c1f76c6962af89e632fc5ae27c | """
Implements the wrapper for the Astropy test runner in the form of the
``./setup.py test`` distutils command.
"""
import os
import stat
import shutil
import subprocess
import sys
import tempfile
from distutils import log
from contextlib import contextmanager
from setuptools import Command
@contextmanager
def _suppress_stdout():
'''
A context manager to temporarily disable stdout.
Used later when installing a temporary copy of astropy to avoid a
very verbose output.
'''
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
class FixRemoteDataOption(type):
"""
This metaclass is used to catch cases where the user is running the tests
with --remote-data. We've now changed the --remote-data option so that it
takes arguments, but we still want --remote-data to work as before and to
enable all remote tests. With this metaclass, we can modify sys.argv
before distutils/setuptools try to parse the command-line options.
"""
def __init__(cls, name, bases, dct):
try:
idx = sys.argv.index('--remote-data')
except ValueError:
pass
else:
sys.argv[idx] = '--remote-data=any'
try:
idx = sys.argv.index('-R')
except ValueError:
pass
else:
sys.argv[idx] = '-R=any'
return super().__init__(name, bases, dct)
class AstropyTest(Command, metaclass=FixRemoteDataOption):
description = 'Run the tests for this package'
user_options = [
('package=', 'P',
"The name of a specific package to test, e.g. 'io.fits' or 'utils'. "
"Accepts comma separated string to specify multiple packages. "
"If nothing is specified, all default tests are run."),
('test-path=', 't',
'Specify a test location by path. If a relative path to a .py file, '
'it is relative to the built package, so e.g., a leading "astropy/" '
'is necessary. If a relative path to a .rst file, it is relative to '
'the directory *below* the --docs-path directory, so a leading '
'"docs/" is usually necessary. May also be an absolute path.'),
('verbose-results', 'V',
'Turn on verbose output from pytest.'),
('plugins=', 'p',
'Plugins to enable when running pytest.'),
('pastebin=', 'b',
"Enable pytest pastebin output. Either 'all' or 'failed'."),
('args=', 'a',
'Additional arguments to be passed to pytest.'),
('remote-data=', 'R', 'Run tests that download remote data. Should be '
'one of none/astropy/any (defaults to none).'),
('pep8', '8',
'Enable PEP8 checking and disable regular tests. '
'Requires the pytest-pep8 plugin.'),
('pdb', 'd',
'Start the interactive Python debugger on errors.'),
('coverage', 'c',
'Create a coverage report. Requires the coverage package.'),
('open-files', 'o', 'Fail if any tests leave files open. Requires the '
'psutil package.'),
('parallel=', 'j',
'Run the tests in parallel on the specified number of '
'CPUs. If "auto", all the cores on the machine will be '
'used. Requires the pytest-xdist plugin.'),
('docs-path=', None,
'The path to the documentation .rst files. If not provided, and '
'the current directory contains a directory called "docs", that '
'will be used.'),
('skip-docs', None,
"Don't test the documentation .rst files."),
('repeat=', None,
'How many times to repeat each test (can be used to check for '
'sporadic failures).'),
('temp-root=', None,
'The root directory in which to create the temporary testing files. '
'If unspecified the system default is used (e.g. /tmp) as explained '
'in the documentation for tempfile.mkstemp.'),
('verbose-install', None,
'Turn on terminal output from the installation of astropy in a '
'temporary folder.'),
('readonly', None,
'Make the temporary installation being tested read-only.')
]
package_name = ''
def initialize_options(self):
self.package = None
self.test_path = None
self.verbose_results = False
self.plugins = None
self.pastebin = None
self.args = None
self.remote_data = 'none'
self.pep8 = False
self.pdb = False
self.coverage = False
self.open_files = False
self.parallel = 0
self.docs_path = None
self.skip_docs = False
self.repeat = None
self.temp_root = None
self.verbose_install = False
self.readonly = False
def finalize_options(self):
# Normally we would validate the options here, but that's handled in
# run_tests
pass
def generate_testing_command(self):
"""
Build a Python script to run the tests.
"""
cmd_pre = '' # Commands to run before the test function
cmd_post = '' # Commands to run after the test function
if self.coverage:
pre, post = self._generate_coverage_commands()
cmd_pre += pre
cmd_post += post
set_flag = "import builtins; builtins._ASTROPY_TEST_ = True"
cmd = ('{cmd_pre}{0}; import {1.package_name}, sys; result = ('
'{1.package_name}.test('
'package={1.package!r}, '
'test_path={1.test_path!r}, '
'args={1.args!r}, '
'plugins={1.plugins!r}, '
'verbose={1.verbose_results!r}, '
'pastebin={1.pastebin!r}, '
'remote_data={1.remote_data!r}, '
'pep8={1.pep8!r}, '
'pdb={1.pdb!r}, '
'open_files={1.open_files!r}, '
'parallel={1.parallel!r}, '
'docs_path={1.docs_path!r}, '
'skip_docs={1.skip_docs!r}, '
'add_local_eggs_to_path=True, ' # see _build_temp_install below
'repeat={1.repeat!r})); '
'{cmd_post}'
'sys.exit(result)')
return cmd.format(set_flag, self, cmd_pre=cmd_pre, cmd_post=cmd_post)
def run(self):
"""
Run the tests!
"""
# Install the runtime dependencies.
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
# Ensure there is a doc path
if self.docs_path is None:
cfg_docs_dir = self.distribution.get_option_dict('build_docs').get('source_dir', None)
# Some affiliated packages use this.
# See astropy/package-template#157
if cfg_docs_dir is not None and os.path.exists(cfg_docs_dir[1]):
self.docs_path = os.path.abspath(cfg_docs_dir[1])
# fall back on a default path of "docs"
elif os.path.exists('docs'): # pragma: no cover
self.docs_path = os.path.abspath('docs')
# Build a testing install of the package
self._build_temp_install()
# Install the test dependencies
# NOTE: we do this here after _build_temp_install because there is
# a weird but which occurs if psutil is installed in this way before
# astropy is built, Cython can have segmentation fault. Strange, eh?
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
# Copy any additional dependencies that may have been installed via
# tests_requires or install_requires. We then pass the
# add_local_eggs_to_path=True option to package.test() to make sure the
# eggs get included in the path.
if os.path.exists('.eggs'):
shutil.copytree('.eggs', os.path.join(self.testing_path, '.eggs'))
# This option exists so that we can make sure that the tests don't
# write to an installed location.
if self.readonly:
log.info('changing permissions of temporary installation to read-only')
self._change_permissions_testing_path(writable=False)
# Run everything in a try: finally: so that the tmp dir gets deleted.
try:
# Construct this modules testing command
cmd = self.generate_testing_command()
# Run the tests in a subprocess--this is necessary since
# new extension modules may have appeared, and this is the
# easiest way to set up a new environment
testproc = subprocess.Popen(
[sys.executable, '-c', cmd],
cwd=self.testing_path, close_fds=False)
retcode = testproc.wait()
except KeyboardInterrupt:
import signal
# If a keyboard interrupt is handled, pass it to the test
# subprocess to prompt pytest to initiate its teardown
testproc.send_signal(signal.SIGINT)
retcode = testproc.wait()
finally:
# Remove temporary directory
if self.readonly:
self._change_permissions_testing_path(writable=True)
shutil.rmtree(self.tmp_dir)
raise SystemExit(retcode)
def _build_temp_install(self):
"""
Install the package and to a temporary directory for the purposes of
testing. This allows us to test the install command, include the
entry points, and also avoids creating pyc and __pycache__ directories
inside the build directory
"""
# On OSX the default path for temp files is under /var, but in most
# cases on OSX /var is actually a symlink to /private/var; ensure we
# dereference that link, because py.test is very sensitive to relative
# paths...
tmp_dir = tempfile.mkdtemp(prefix=self.package_name + '-test-',
dir=self.temp_root)
self.tmp_dir = os.path.realpath(tmp_dir)
log.info('installing to temporary directory: {0}'.format(self.tmp_dir))
# We now install the package to the temporary directory. We do this
# rather than build and copy because this will ensure that e.g. entry
# points work.
self.reinitialize_command('install')
install_cmd = self.distribution.get_command_obj('install')
install_cmd.prefix = self.tmp_dir
if self.verbose_install:
self.run_command('install')
else:
with _suppress_stdout():
self.run_command('install')
# We now get the path to the site-packages directory that was created
# inside self.tmp_dir
install_cmd = self.get_finalized_command('install')
self.testing_path = install_cmd.install_lib
# Ideally, docs_path is set properly in run(), but if it is still
# not set here, do not pretend it is, otherwise bad things happen.
# See astropy/package-template#157
if self.docs_path is not None:
new_docs_path = os.path.join(self.testing_path,
os.path.basename(self.docs_path))
shutil.copytree(self.docs_path, new_docs_path)
self.docs_path = new_docs_path
shutil.copy('setup.cfg', self.testing_path)
def _change_permissions_testing_path(self, writable=False):
if writable:
basic_flags = stat.S_IRUSR | stat.S_IWUSR
else:
basic_flags = stat.S_IRUSR
for root, dirs, files in os.walk(self.testing_path):
for dirname in dirs:
os.chmod(os.path.join(root, dirname), basic_flags | stat.S_IXUSR)
for filename in files:
os.chmod(os.path.join(root, filename), basic_flags)
def _generate_coverage_commands(self):
"""
This method creates the post and pre commands if coverage is to be
generated
"""
if self.parallel != 0:
raise ValueError(
"--coverage can not be used with --parallel")
try:
import coverage # pylint: disable=W0611
except ImportError:
raise ImportError(
"--coverage requires that the coverage package is "
"installed.")
# Don't use get_pkg_data_filename here, because it
# requires importing astropy.config and thus screwing
# up coverage results for those packages.
coveragerc = os.path.join(
self.testing_path, self.package_name.replace('.', '/'),
'tests', 'coveragerc')
with open(coveragerc, 'r') as fd:
coveragerc_content = fd.read()
coveragerc_content = coveragerc_content.replace(
"{packagename}", self.package_name.replace('.', '/'))
tmp_coveragerc = os.path.join(self.tmp_dir, 'coveragerc')
with open(tmp_coveragerc, 'wb') as tmp:
tmp.write(coveragerc_content.encode('utf-8'))
cmd_pre = (
'import coverage; '
'cov = coverage.coverage(data_file=r"{0}", config_file=r"{1}"); '
'cov.start();'.format(
os.path.abspath(".coverage"), os.path.abspath(tmp_coveragerc)))
cmd_post = (
'cov.stop(); '
'from astropy.tests.helper import _save_coverage; '
'_save_coverage(cov, result, r"{0}", r"{1}");'.format(
os.path.abspath('.'), os.path.abspath(self.testing_path)))
return cmd_pre, cmd_post
|
44b37769adb1e0dd10a613f344eb573a8714544ae15d9935a2fd12519c87bbb3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import importlib
import sys
import warnings
import pytest
from .helper import catch_warnings
from astropy import log
from astropy.logger import LoggingError, conf
from astropy.utils.exceptions import AstropyWarning, AstropyUserWarning
# Save original values of hooks. These are not the system values, but the
# already overwritten values since the logger already gets imported before
# this file gets executed.
_excepthook = sys.__excepthook__
_showwarning = warnings.showwarning
try:
ip = get_ipython()
except NameError:
ip = None
def setup_function(function):
# Reset modules to default
importlib.reload(warnings)
importlib.reload(sys)
# Reset internal original hooks
log._showwarning_orig = None
log._excepthook_orig = None
# Set up the logger
log._set_defaults()
# Reset hooks
if log.warnings_logging_enabled():
log.disable_warnings_logging()
if log.exception_logging_enabled():
log.disable_exception_logging()
teardown_module = setup_function
def test_warnings_logging_disable_no_enable():
with pytest.raises(LoggingError) as e:
log.disable_warnings_logging()
assert e.value.args[0] == 'Warnings logging has not been enabled'
def test_warnings_logging_enable_twice():
log.enable_warnings_logging()
with pytest.raises(LoggingError) as e:
log.enable_warnings_logging()
assert e.value.args[0] == 'Warnings logging has already been enabled'
def test_warnings_logging_overridden():
log.enable_warnings_logging()
warnings.showwarning = lambda: None
with pytest.raises(LoggingError) as e:
log.disable_warnings_logging()
assert e.value.args[0] == 'Cannot disable warnings logging: warnings.showwarning was not set by this logger, or has been overridden'
def test_warnings_logging():
# Without warnings logging
with catch_warnings() as warn_list:
with log.log_to_list() as log_list:
warnings.warn("This is a warning", AstropyUserWarning)
assert len(log_list) == 0
assert len(warn_list) == 1
assert warn_list[0].message.args[0] == "This is a warning"
# With warnings logging
with catch_warnings() as warn_list:
log.enable_warnings_logging()
with log.log_to_list() as log_list:
warnings.warn("This is a warning", AstropyUserWarning)
log.disable_warnings_logging()
assert len(log_list) == 1
assert len(warn_list) == 0
assert log_list[0].levelname == 'WARNING'
assert log_list[0].message.startswith('This is a warning')
assert log_list[0].origin == 'astropy.tests.test_logger'
# With warnings logging (differentiate between Astropy and non-Astropy)
with catch_warnings() as warn_list:
log.enable_warnings_logging()
with log.log_to_list() as log_list:
warnings.warn("This is a warning", AstropyUserWarning)
warnings.warn("This is another warning, not from Astropy")
log.disable_warnings_logging()
assert len(log_list) == 1
assert len(warn_list) == 1
assert log_list[0].levelname == 'WARNING'
assert log_list[0].message.startswith('This is a warning')
assert log_list[0].origin == 'astropy.tests.test_logger'
assert warn_list[0].message.args[0] == "This is another warning, not from Astropy"
# Without warnings logging
with catch_warnings() as warn_list:
with log.log_to_list() as log_list:
warnings.warn("This is a warning", AstropyUserWarning)
assert len(log_list) == 0
assert len(warn_list) == 1
assert warn_list[0].message.args[0] == "This is a warning"
def test_warnings_logging_with_custom_class():
class CustomAstropyWarningClass(AstropyWarning):
pass
# With warnings logging
with catch_warnings() as warn_list:
log.enable_warnings_logging()
with log.log_to_list() as log_list:
warnings.warn("This is a warning", CustomAstropyWarningClass)
log.disable_warnings_logging()
assert len(log_list) == 1
assert len(warn_list) == 0
assert log_list[0].levelname == 'WARNING'
assert log_list[0].message.startswith('CustomAstropyWarningClass: This is a warning')
assert log_list[0].origin == 'astropy.tests.test_logger'
def test_warning_logging_with_io_votable_warning():
from astropy.io.votable.exceptions import W02, vo_warn
with catch_warnings() as warn_list:
log.enable_warnings_logging()
with log.log_to_list() as log_list:
vo_warn(W02, ('a', 'b'))
log.disable_warnings_logging()
assert len(log_list) == 1
assert len(warn_list) == 0
assert log_list[0].levelname == 'WARNING'
x = log_list[0].message.startswith(("W02: ?:?:?: W02: a attribute 'b' is "
"invalid. Must be a standard XML id"))
assert x
assert log_list[0].origin == 'astropy.tests.test_logger'
def test_import_error_in_warning_logging():
"""
Regression test for https://github.com/astropy/astropy/issues/2671
This test actually puts a goofy fake module into ``sys.modules`` to test
this problem.
"""
class FakeModule:
def __getattr__(self, attr):
raise ImportError('_showwarning should ignore any exceptions '
'here')
log.enable_warnings_logging()
sys.modules['<test fake module>'] = FakeModule()
try:
warnings.showwarning(AstropyWarning('Regression test for #2671'),
AstropyWarning, '<this is only a test>', 1)
finally:
del sys.modules['<test fake module>']
def test_exception_logging_disable_no_enable():
with pytest.raises(LoggingError) as e:
log.disable_exception_logging()
assert e.value.args[0] == 'Exception logging has not been enabled'
def test_exception_logging_enable_twice():
log.enable_exception_logging()
with pytest.raises(LoggingError) as e:
log.enable_exception_logging()
assert e.value.args[0] == 'Exception logging has already been enabled'
# You can't really override the exception handler in IPython this way, so
# this test doesn't really make sense in the IPython context.
@pytest.mark.skipif(str("ip is not None"))
def test_exception_logging_overridden():
log.enable_exception_logging()
sys.excepthook = lambda etype, evalue, tb: None
with pytest.raises(LoggingError) as e:
log.disable_exception_logging()
assert e.value.args[0] == 'Cannot disable exception logging: sys.excepthook was not set by this logger, or has been overridden'
@pytest.mark.xfail(str("ip is not None"))
def test_exception_logging():
# Without exception logging
try:
with log.log_to_list() as log_list:
raise Exception("This is an Exception")
except Exception as exc:
sys.excepthook(*sys.exc_info())
assert exc.args[0] == "This is an Exception"
else:
assert False # exception should have been raised
assert len(log_list) == 0
# With exception logging
try:
log.enable_exception_logging()
with log.log_to_list() as log_list:
raise Exception("This is an Exception")
except Exception as exc:
sys.excepthook(*sys.exc_info())
assert exc.args[0] == "This is an Exception"
else:
assert False # exception should have been raised
assert len(log_list) == 1
assert log_list[0].levelname == 'ERROR'
assert log_list[0].message.startswith('Exception: This is an Exception')
assert log_list[0].origin == 'astropy.tests.test_logger'
# Without exception logging
log.disable_exception_logging()
try:
with log.log_to_list() as log_list:
raise Exception("This is an Exception")
except Exception as exc:
sys.excepthook(*sys.exc_info())
assert exc.args[0] == "This is an Exception"
else:
assert False # exception should have been raised
assert len(log_list) == 0
@pytest.mark.xfail(str("ip is not None"))
def test_exception_logging_origin():
# The point here is to get an exception raised from another location
# and make sure the error's origin is reported correctly
from astropy.utils.collections import HomogeneousList
l = HomogeneousList(int)
try:
log.enable_exception_logging()
with log.log_to_list() as log_list:
l.append('foo')
except TypeError as exc:
sys.excepthook(*sys.exc_info())
assert exc.args[0].startswith(
"homogeneous list must contain only objects of type ")
else:
assert False
assert len(log_list) == 1
assert log_list[0].levelname == 'ERROR'
assert log_list[0].message.startswith(
"TypeError: homogeneous list must contain only objects of type ")
assert log_list[0].origin == 'astropy.utils.collections'
@pytest.mark.skip(reason="Infinite recursion on Python 3.5+, probably a real issue")
#@pytest.mark.xfail(str("ip is not None"))
def test_exception_logging_argless_exception():
"""
Regression test for a crash that occurred on Python 3 when logging an
exception that was instantiated with no arguments (no message, etc.)
Regression test for https://github.com/astropy/astropy/pull/4056
"""
try:
log.enable_exception_logging()
with log.log_to_list() as log_list:
raise Exception()
except Exception as exc:
sys.excepthook(*sys.exc_info())
else:
assert False # exception should have been raised
assert len(log_list) == 1
assert log_list[0].levelname == 'ERROR'
assert log_list[0].message == 'Exception [astropy.tests.test_logger]'
assert log_list[0].origin == 'astropy.tests.test_logger'
@pytest.mark.parametrize(('level'), [None, 'DEBUG', 'INFO', 'WARN', 'ERROR'])
def test_log_to_list(level):
orig_level = log.level
try:
if level is not None:
log.setLevel(level)
with log.log_to_list() as log_list:
log.error("Error message")
log.warning("Warning message")
log.info("Information message")
log.debug("Debug message")
finally:
log.setLevel(orig_level)
if level is None:
# The log level *should* be set to whatever it was in the config
level = conf.log_level
# Check list length
if level == 'DEBUG':
assert len(log_list) == 4
elif level == 'INFO':
assert len(log_list) == 3
elif level == 'WARN':
assert len(log_list) == 2
elif level == 'ERROR':
assert len(log_list) == 1
# Check list content
assert log_list[0].levelname == 'ERROR'
assert log_list[0].message.startswith('Error message')
assert log_list[0].origin == 'astropy.tests.test_logger'
if len(log_list) >= 2:
assert log_list[1].levelname == 'WARNING'
assert log_list[1].message.startswith('Warning message')
assert log_list[1].origin == 'astropy.tests.test_logger'
if len(log_list) >= 3:
assert log_list[2].levelname == 'INFO'
assert log_list[2].message.startswith('Information message')
assert log_list[2].origin == 'astropy.tests.test_logger'
if len(log_list) >= 4:
assert log_list[3].levelname == 'DEBUG'
assert log_list[3].message.startswith('Debug message')
assert log_list[3].origin == 'astropy.tests.test_logger'
def test_log_to_list_level():
with log.log_to_list(filter_level='ERROR') as log_list:
log.error("Error message")
log.warning("Warning message")
assert len(log_list) == 1 and log_list[0].levelname == 'ERROR'
def test_log_to_list_origin1():
with log.log_to_list(filter_origin='astropy.tests') as log_list:
log.error("Error message")
log.warning("Warning message")
assert len(log_list) == 2
def test_log_to_list_origin2():
with log.log_to_list(filter_origin='astropy.wcs') as log_list:
log.error("Error message")
log.warning("Warning message")
assert len(log_list) == 0
@pytest.mark.parametrize(('level'), [None, 'DEBUG', 'INFO', 'WARN', 'ERROR'])
def test_log_to_file(tmpdir, level):
local_path = tmpdir.join('test.log')
log_file = local_path.open('wb')
log_path = str(local_path.realpath())
orig_level = log.level
try:
if level is not None:
log.setLevel(level)
with log.log_to_file(log_path):
log.error("Error message")
log.warning("Warning message")
log.info("Information message")
log.debug("Debug message")
log_file.close()
finally:
log.setLevel(orig_level)
log_file = local_path.open('rb')
log_entries = log_file.readlines()
log_file.close()
if level is None:
# The log level *should* be set to whatever it was in the config
level = conf.log_level
# Check list length
if level == 'DEBUG':
assert len(log_entries) == 4
elif level == 'INFO':
assert len(log_entries) == 3
elif level == 'WARN':
assert len(log_entries) == 2
elif level == 'ERROR':
assert len(log_entries) == 1
# Check list content
assert eval(log_entries[0].strip())[-3:] == (
'astropy.tests.test_logger', 'ERROR', 'Error message')
if len(log_entries) >= 2:
assert eval(log_entries[1].strip())[-3:] == (
'astropy.tests.test_logger', 'WARNING', 'Warning message')
if len(log_entries) >= 3:
assert eval(log_entries[2].strip())[-3:] == (
'astropy.tests.test_logger', 'INFO', 'Information message')
if len(log_entries) >= 4:
assert eval(log_entries[3].strip())[-3:] == (
'astropy.tests.test_logger', 'DEBUG', 'Debug message')
def test_log_to_file_level(tmpdir):
local_path = tmpdir.join('test.log')
log_file = local_path.open('wb')
log_path = str(local_path.realpath())
with log.log_to_file(log_path, filter_level='ERROR'):
log.error("Error message")
log.warning("Warning message")
log_file.close()
log_file = local_path.open('rb')
log_entries = log_file.readlines()
log_file.close()
assert len(log_entries) == 1
assert eval(log_entries[0].strip())[-2:] == (
'ERROR', 'Error message')
def test_log_to_file_origin1(tmpdir):
local_path = tmpdir.join('test.log')
log_file = local_path.open('wb')
log_path = str(local_path.realpath())
with log.log_to_file(log_path, filter_origin='astropy.tests'):
log.error("Error message")
log.warning("Warning message")
log_file.close()
log_file = local_path.open('rb')
log_entries = log_file.readlines()
log_file.close()
assert len(log_entries) == 2
def test_log_to_file_origin2(tmpdir):
local_path = tmpdir.join('test.log')
log_file = local_path.open('wb')
log_path = str(local_path.realpath())
with log.log_to_file(log_path, filter_origin='astropy.wcs'):
log.error("Error message")
log.warning("Warning message")
log_file.close()
log_file = local_path.open('rb')
log_entries = log_file.readlines()
log_file.close()
assert len(log_entries) == 0
|
184bd51daf99db34fd34c06a649931a1d7248bd860dad2bccd4d411f1790b1b2 | """Implements the Astropy TestRunner which is a thin wrapper around py.test."""
import inspect
import os
import glob
import copy
import shlex
import sys
import tempfile
import warnings
import importlib
from collections import OrderedDict
from importlib.util import find_spec
from functools import wraps
from astropy.config.paths import set_temp_config, set_temp_cache
from astropy.utils import find_current_module
from astropy.utils.exceptions import AstropyWarning, AstropyDeprecationWarning
__all__ = ['TestRunner', 'TestRunnerBase', 'keyword']
class keyword:
"""
A decorator to mark a method as keyword argument for the ``TestRunner``.
Parameters
----------
default_value : `object`
The default value for the keyword argument. (Default: `None`)
priority : `int`
keyword argument methods are executed in order of descending priority.
"""
def __init__(self, default_value=None, priority=0):
self.default_value = default_value
self.priority = priority
def __call__(self, f):
def keyword(*args, **kwargs):
return f(*args, **kwargs)
keyword._default_value = self.default_value
keyword._priority = self.priority
# Set __doc__ explicitly here rather than using wraps because we want
# to keep the function name as keyword so we can inspect it later.
keyword.__doc__ = f.__doc__
return keyword
class TestRunnerBase:
"""
The base class for the TestRunner.
A test runner can be constructed by creating a subclass of this class and
defining 'keyword' methods. These are methods that have the
`~astropy.tests.runner.keyword` decorator, these methods are used to
construct allowed keyword arguments to the
`~astropy.tests.runner.TestRunnerBase.run_tests` method as a way to allow
customization of individual keyword arguments (and associated logic)
without having to re-implement the whole
`~astropy.tests.runner.TestRunnerBase.run_tests` method.
Examples
--------
A simple keyword method::
class MyRunner(TestRunnerBase):
@keyword('default_value'):
def spam(self, spam, kwargs):
\"\"\"
spam : `str`
The parameter description for the run_tests docstring.
\"\"\"
# Return value must be a list with a CLI parameter for pytest.
return ['--spam={}'.format(spam)]
"""
def __init__(self, base_path):
self.base_path = os.path.abspath(base_path)
def __new__(cls, *args, **kwargs):
# Before constructing the class parse all the methods that have been
# decorated with ``keyword``.
# The objective of this method is to construct a default set of keyword
# arguments to the ``run_tests`` method. It does this by inspecting the
# methods of the class for functions with the name ``keyword`` which is
# the name of the decorator wrapping function. Once it has created this
# dictionary, it also formats the docstring of ``run_tests`` to be
# comprised of the docstrings for the ``keyword`` methods.
# To add a keyword argument to the ``run_tests`` method, define a new
# method decorated with ``@keyword`` and with the ``self, name, kwargs``
# signature.
# Get all 'function' members as the wrapped methods are functions
functions = inspect.getmembers(cls, predicate=inspect.isfunction)
# Filter out anything that's not got the name 'keyword'
keywords = filter(lambda func: func[1].__name__ == 'keyword', functions)
# Sort all keywords based on the priority flag.
sorted_keywords = sorted(keywords, key=lambda x: x[1]._priority, reverse=True)
cls.keywords = OrderedDict()
doc_keywords = ""
for name, func in sorted_keywords:
# Here we test if the function has been overloaded to return
# NotImplemented which is the way to disable arguments on
# subclasses. If it has been disabled we need to remove it from the
# default keywords dict. We do it in the try except block because
# we do not have access to an instance of the class, so this is
# going to error unless the method is just doing `return
# NotImplemented`.
try:
# Second argument is False, as it is normally a bool.
# The other two are placeholders for objects.
if func(None, False, None) is NotImplemented:
continue
except Exception:
pass
# Construct the default kwargs dict and docstring
cls.keywords[name] = func._default_value
if func.__doc__:
doc_keywords += ' '*8
doc_keywords += func.__doc__.strip()
doc_keywords += '\n\n'
cls.run_tests.__doc__ = cls.RUN_TESTS_DOCSTRING.format(keywords=doc_keywords)
return super().__new__(cls)
def _generate_args(self, **kwargs):
# Update default values with passed kwargs
# but don't modify the defaults
keywords = copy.deepcopy(self.keywords)
keywords.update(kwargs)
# Iterate through the keywords (in order of priority)
args = []
for keyword in keywords.keys():
func = getattr(self, keyword)
result = func(keywords[keyword], keywords)
# Allow disabling of options in a subclass
if result is NotImplemented:
raise TypeError("run_tests() got an unexpected keyword argument {}".format(keyword))
# keyword methods must return a list
if not isinstance(result, list):
raise TypeError("{} keyword method must return a list".format(keyword))
args += result
return args
RUN_TESTS_DOCSTRING = \
"""
Run the tests for the package.
This method builds arguments for and then calls ``pytest.main``.
Parameters
----------
{keywords}
"""
_required_dependancies = ['pytest', 'pytest_remotedata', 'pytest_doctestplus']
_missing_dependancy_error = "Test dependencies are missing. You should install the 'pytest-astropy' package."
@classmethod
def _has_test_dependencies(cls): # pragma: no cover
# Using the test runner will not work without these dependencies, but
# pytest-openfiles is optional, so it's not listed here.
for module in cls._required_dependancies:
spec = find_spec(module)
# Checking loader accounts for packages that were uninstalled
if spec is None or spec.loader is None:
raise RuntimeError(cls._missing_dependancy_error)
def run_tests(self, **kwargs):
# The following option will include eggs inside a .eggs folder in
# sys.path when running the tests. This is possible so that when
# runnning python setup.py test, test dependencies installed via e.g.
# tests_requires are available here. This is not an advertised option
# since it is only for internal use
if kwargs.pop('add_local_eggs_to_path', False):
# Add each egg to sys.path individually
for egg in glob.glob(os.path.join('.eggs', '*.egg')):
sys.path.insert(0, egg)
# We now need to force reload pkg_resources in case any pytest
# plugins were added above, so that their entry points are picked up
import pkg_resources
importlib.reload(pkg_resources)
self._has_test_dependencies() # pragma: no cover
# The docstring for this method is defined as a class variable.
# This allows it to be built for each subclass in __new__.
# Don't import pytest until it's actually needed to run the tests
import pytest
# Raise error for undefined kwargs
allowed_kwargs = set(self.keywords.keys())
passed_kwargs = set(kwargs.keys())
if not passed_kwargs.issubset(allowed_kwargs):
wrong_kwargs = list(passed_kwargs.difference(allowed_kwargs))
raise TypeError("run_tests() got an unexpected keyword argument {}".format(wrong_kwargs[0]))
args = self._generate_args(**kwargs)
if kwargs.get('plugins', None) is not None:
plugins = kwargs.pop('plugins')
elif self.keywords.get('plugins', None) is not None:
plugins = self.keywords['plugins']
else:
plugins = []
# override the config locations to not make a new directory nor use
# existing cache or config
astropy_config = tempfile.mkdtemp('astropy_config')
astropy_cache = tempfile.mkdtemp('astropy_cache')
# Have to use nested with statements for cross-Python support
# Note, using these context managers here is superfluous if the
# config_dir or cache_dir options to py.test are in use, but it's
# also harmless to nest the contexts
with set_temp_config(astropy_config, delete=True):
with set_temp_cache(astropy_cache, delete=True):
return pytest.main(args=args, plugins=plugins)
@classmethod
def make_test_runner_in(cls, path):
"""
Constructs a `TestRunner` to run in the given path, and returns a
``test()`` function which takes the same arguments as
`TestRunner.run_tests`.
The returned ``test()`` function will be defined in the module this
was called from. This is used to implement the ``astropy.test()``
function (or the equivalent for affiliated packages).
"""
runner = cls(path)
@wraps(runner.run_tests, ('__doc__',))
def test(**kwargs):
return runner.run_tests(**kwargs)
module = find_current_module(2)
if module is not None:
test.__module__ = module.__name__
# A somewhat unusual hack, but delete the attached __wrapped__
# attribute--although this is normally used to tell if the function
# was wrapped with wraps, on some version of Python this is also
# used to determine the signature to display in help() which is
# not useful in this case. We don't really care in this case if the
# function was wrapped either
if hasattr(test, '__wrapped__'):
del test.__wrapped__
test.__test__ = False
return test
class TestRunner(TestRunnerBase):
"""
A test runner for astropy tests
"""
def packages_path(self, packages, base_path, error=None, warning=None):
"""
Generates the path for multiple packages.
Parameters
----------
packages : str
Comma separated string of packages.
base_path : str
Base path to the source code or documentation.
error : str
Error message to be raised as ``ValueError``. Individual package
name and path can be accessed by ``{name}`` and ``{path}``
respectively. No error is raised if `None`. (Default: `None`)
warning : str
Warning message to be issued. Individual package
name and path can be accessed by ``{name}`` and ``{path}``
respectively. No warning is issues if `None`. (Default: `None`)
Returns
-------
paths : list of str
List of stings of existing package paths.
"""
packages = packages.split(",")
paths = []
for package in packages:
path = os.path.join(
base_path, package.replace('.', os.path.sep))
if not os.path.isdir(path):
info = {'name': package, 'path': path}
if error is not None:
raise ValueError(error.format(**info))
if warning is not None:
warnings.warn(warning.format(**info))
else:
paths.append(path)
return paths
# Increase priority so this warning is displayed first.
@keyword(priority=1000)
def coverage(self, coverage, kwargs):
if coverage:
warnings.warn(
"The coverage option is ignored on run_tests, since it "
"can not be made to work in that context. Use "
"'python setup.py test --coverage' instead.",
AstropyWarning)
return []
# test_path depends on self.package_path so make sure this runs before
# test_path.
@keyword(priority=1)
def package(self, package, kwargs):
"""
package : str, optional
The name of a specific package to test, e.g. 'io.fits' or
'utils'. Accepts comma separated string to specify multiple
packages. If nothing is specified all default tests are run.
"""
if package is None:
self.package_path = [self.base_path]
else:
error_message = ('package to test is not found: {name} '
'(at path {path}).')
self.package_path = self.packages_path(package, self.base_path,
error=error_message)
if not kwargs['test_path']:
return self.package_path
return []
@keyword()
def test_path(self, test_path, kwargs):
"""
test_path : str, optional
Specify location to test by path. May be a single file or
directory. Must be specified absolutely or relative to the
calling directory.
"""
all_args = []
# Ensure that the package kwarg has been run.
self.package(kwargs['package'], kwargs)
if test_path:
base, ext = os.path.splitext(test_path)
if ext in ('.rst', ''):
if kwargs['docs_path'] is None:
# This shouldn't happen from "python setup.py test"
raise ValueError(
"Can not test .rst files without a docs_path "
"specified.")
abs_docs_path = os.path.abspath(kwargs['docs_path'])
abs_test_path = os.path.abspath(
os.path.join(abs_docs_path, os.pardir, test_path))
common = os.path.commonprefix((abs_docs_path, abs_test_path))
if os.path.exists(abs_test_path) and common == abs_docs_path:
# Turn on the doctest_rst plugin
all_args.append('--doctest-rst')
test_path = abs_test_path
# Check that the extensions are in the path and not at the end to
# support specifying the name of the test, i.e.
# test_quantity.py::test_unit
if not (os.path.isdir(test_path) or ('.py' in test_path or '.rst' in test_path)):
raise ValueError("Test path must be a directory or a path to "
"a .py or .rst file")
return all_args + [test_path]
return []
@keyword()
def args(self, args, kwargs):
"""
args : str, optional
Additional arguments to be passed to ``pytest.main`` in the ``args``
keyword argument.
"""
if args:
return shlex.split(args, posix=not sys.platform.startswith('win'))
return []
@keyword(default_value=['astropy.tests.plugins.display'])
def plugins(self, plugins, kwargs):
"""
plugins : list, optional
Plugins to be passed to ``pytest.main`` in the ``plugins`` keyword
argument.
"""
# Plugins are handled independently by `run_tests` so we define this
# keyword just for the docstring
return []
@keyword()
def verbose(self, verbose, kwargs):
"""
verbose : bool, optional
Convenience option to turn on verbose output from py.test. Passing
True is the same as specifying ``-v`` in ``args``.
"""
if verbose:
return ['-v']
return []
@keyword()
def pastebin(self, pastebin, kwargs):
"""
pastebin : ('failed', 'all', None), optional
Convenience option for turning on py.test pastebin output. Set to
'failed' to upload info for failed tests, or 'all' to upload info
for all tests.
"""
if pastebin is not None:
if pastebin in ['failed', 'all']:
return ['--pastebin={0}'.format(pastebin)]
else:
raise ValueError("pastebin should be 'failed' or 'all'")
return []
@keyword(default_value='none')
def remote_data(self, remote_data, kwargs):
"""
remote_data : {'none', 'astropy', 'any'}, optional
Controls whether to run tests marked with @pytest.mark.remote_data. This can be
set to run no tests with remote data (``none``), only ones that use
data from http://data.astropy.org (``astropy``), or all tests that
use remote data (``any``). The default is ``none``.
"""
if remote_data is True:
remote_data = 'any'
elif remote_data is False:
remote_data = 'none'
elif remote_data not in ('none', 'astropy', 'any'):
warnings.warn("The remote_data option should be one of "
"none/astropy/any (found {0}). For backward-compatibility, "
"assuming 'any', but you should change the option to be "
"one of the supported ones to avoid issues in "
"future.".format(remote_data),
AstropyDeprecationWarning)
remote_data = 'any'
return ['--remote-data={0}'.format(remote_data)]
@keyword()
def pep8(self, pep8, kwargs):
"""
pep8 : bool, optional
Turn on PEP8 checking via the pytest-pep8 plugin and disable normal
tests. Same as specifying ``--pep8 -k pep8`` in ``args``.
"""
if pep8:
try:
import pytest_pep8 # pylint: disable=W0611
except ImportError:
raise ImportError('PEP8 checking requires pytest-pep8 plugin: '
'http://pypi.python.org/pypi/pytest-pep8')
else:
return ['--pep8', '-k', 'pep8']
return []
@keyword()
def pdb(self, pdb, kwargs):
"""
pdb : bool, optional
Turn on PDB post-mortem analysis for failing tests. Same as
specifying ``--pdb`` in ``args``.
"""
if pdb:
return ['--pdb']
return []
@keyword()
def open_files(self, open_files, kwargs):
"""
open_files : bool, optional
Fail when any tests leave files open. Off by default, because
this adds extra run time to the test suite. Requires the
``psutil`` package.
"""
if open_files:
if kwargs['parallel'] != 0:
raise SystemError(
"open file detection may not be used in conjunction with "
"parallel testing.")
try:
import psutil # pylint: disable=W0611
except ImportError:
raise SystemError(
"open file detection requested, but psutil package "
"is not installed.")
return ['--open-files']
print("Checking for unclosed files")
return []
@keyword(0)
def parallel(self, parallel, kwargs):
"""
parallel : int or 'auto', optional
When provided, run the tests in parallel on the specified
number of CPUs. If parallel is ``'auto'``, it will use the all
the cores on the machine. Requires the ``pytest-xdist`` plugin.
"""
if parallel != 0:
try:
from xdist import plugin # noqa
except ImportError:
raise SystemError(
"running tests in parallel requires the pytest-xdist package")
return ['-n', str(parallel)]
return []
@keyword()
def docs_path(self, docs_path, kwargs):
"""
docs_path : str, optional
The path to the documentation .rst files.
"""
paths = []
if docs_path is not None and not kwargs['skip_docs']:
if kwargs['package'] is not None:
warning_message = ("Can not test .rst docs for {name}, since "
"docs path ({path}) does not exist.")
paths = self.packages_path(kwargs['package'], docs_path,
warning=warning_message)
elif not kwargs['test_path']:
paths = [docs_path, ]
if len(paths) and not kwargs['test_path']:
paths.append('--doctest-rst')
return paths
@keyword()
def skip_docs(self, skip_docs, kwargs):
"""
skip_docs : `bool`, optional
When `True`, skips running the doctests in the .rst files.
"""
# Skip docs is a bool used by docs_path only.
return []
@keyword()
def repeat(self, repeat, kwargs):
"""
repeat : `int`, optional
If set, specifies how many times each test should be run. This is
useful for diagnosing sporadic failures.
"""
if repeat:
return ['--repeat={0}'.format(repeat)]
return []
# Override run_tests for astropy-specific fixes
def run_tests(self, **kwargs):
# This prevents cyclical import problems that make it
# impossible to test packages that define Table types on their
# own.
from astropy.table import Table # pylint: disable=W0611
return super().run_tests(**kwargs)
|
0f3c1dcf4f7d3cdac627c8b5dcb464309560ead8a1b50ff952aa32ee964b2791 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides the tools used to internally run the astropy test suite
from the installed astropy. It makes use of the `pytest` testing framework.
"""
import os
import sys
import types
import pickle
import warnings
import functools
import pytest
try:
# Import pkg_resources to prevent it from issuing warnings upon being
# imported from within py.test. See
# https://github.com/astropy/astropy/pull/537 for a detailed explanation.
import pkg_resources # pylint: disable=W0611
except ImportError:
pass
from astropy.units import allclose as quantity_allclose # noqa
from astropy.utils.exceptions import (AstropyDeprecationWarning,
AstropyPendingDeprecationWarning)
# For backward-compatibility with affiliated packages
from .runner import TestRunner # pylint: disable=W0611
__all__ = ['raises', 'enable_deprecations_as_exceptions', 'remote_data',
'treat_deprecations_as_exceptions', 'catch_warnings',
'assert_follows_unicode_guidelines',
'assert_quantity_allclose', 'check_pickling_recovery',
'pickle_protocol', 'generic_recursive_equality_test']
# pytest marker to mark tests which get data from the web
# This is being maintained for backwards compatibility
remote_data = pytest.mark.remote_data
# distutils expects options to be Unicode strings
def _fix_user_options(options):
def to_str_or_none(x):
if x is None:
return None
return str(x)
return [tuple(to_str_or_none(x) for x in y) for y in options]
def _save_coverage(cov, result, rootdir, testing_path):
"""
This method is called after the tests have been run in coverage mode
to cleanup and then save the coverage data and report.
"""
from astropy.utils.console import color_print
if result != 0:
return
# The coverage report includes the full path to the temporary
# directory, so we replace all the paths with the true source
# path. Note that this will not work properly for packages that still
# rely on 2to3.
try:
# Coverage 4.0: _harvest_data has been renamed to get_data, the
# lines dict is private
cov.get_data()
except AttributeError:
# Coverage < 4.0
cov._harvest_data()
lines = cov.data.lines
else:
lines = cov.data._lines
for key in list(lines.keys()):
new_path = os.path.relpath(
os.path.realpath(key),
os.path.realpath(testing_path))
new_path = os.path.abspath(
os.path.join(rootdir, new_path))
lines[new_path] = lines.pop(key)
color_print('Saving coverage data in .coverage...', 'green')
cov.save()
color_print('Saving HTML coverage report in htmlcov...', 'green')
cov.html_report(directory=os.path.join(rootdir, 'htmlcov'))
class raises:
"""
A decorator to mark that a test should raise a given exception.
Use as follows::
@raises(ZeroDivisionError)
def test_foo():
x = 1/0
This can also be used a context manager, in which case it is just
an alias for the ``pytest.raises`` context manager (because the
two have the same name this help avoid confusion by being
flexible).
"""
# pep-8 naming exception -- this is a decorator class
def __init__(self, exc):
self._exc = exc
self._ctx = None
def __call__(self, func):
@functools.wraps(func)
def run_raises_test(*args, **kwargs):
pytest.raises(self._exc, func, *args, **kwargs)
return run_raises_test
def __enter__(self):
self._ctx = pytest.raises(self._exc)
return self._ctx.__enter__()
def __exit__(self, *exc_info):
return self._ctx.__exit__(*exc_info)
_deprecations_as_exceptions = False
_include_astropy_deprecations = True
_modules_to_ignore_on_import = set([
'compiler', # A deprecated stdlib module used by py.test
'scipy',
'pygments',
'ipykernel',
'IPython', # deprecation warnings for async and await
'setuptools'])
_warnings_to_ignore_entire_module = set([])
_warnings_to_ignore_by_pyver = {
None: set([ # Python version agnostic
# py.test reads files with the 'U' flag, which is
# deprecated.
r"'U' mode is deprecated",
# https://github.com/astropy/astropy/pull/7372
r"Importing from numpy\.testing\.decorators is deprecated, "
r"import from numpy\.testing instead\.",
# Deprecation warnings ahead of pytest 4.x
r"MarkInfo objects are deprecated"]),
(3, 5): set([
# py.test raised this warning in inspect on Python 3.5.
# See https://github.com/pytest-dev/pytest/pull/1009
# Keeping it since e.g. lxml as of 3.8.0 is still calling getargspec()
r"inspect\.getargspec\(\) is deprecated, use "
r"inspect\.signature\(\) instead"]),
(3, 6): set([
# inspect raises this slightly different warning on Python 3.6-3.7.
# Keeping it since e.g. lxml as of 3.8.0 is still calling getargspec()
r"inspect\.getargspec\(\) is deprecated, use "
r"inspect\.signature\(\) or inspect\.getfullargspec\(\)"]),
(3, 7): set([
# inspect raises this slightly different warning on Python 3.6-3.7.
# Keeping it since e.g. lxml as of 3.8.0 is still calling getargspec()
r"inspect\.getargspec\(\) is deprecated, use "
r"inspect\.signature\(\) or inspect\.getfullargspec\(\)",
# Deprecation warning for collections.abc, fixed in Astropy but still
# used in lxml, and maybe others
r"Using or importing the ABCs from 'collections'"])
}
def enable_deprecations_as_exceptions(include_astropy_deprecations=True,
modules_to_ignore_on_import=[],
warnings_to_ignore_entire_module=[],
warnings_to_ignore_by_pyver={}):
"""
Turn on the feature that turns deprecations into exceptions.
Parameters
----------
include_astropy_deprecations : bool
If set to `True`, ``AstropyDeprecationWarning`` and
``AstropyPendingDeprecationWarning`` are also turned into exceptions.
modules_to_ignore_on_import : list of str
List of additional modules that generate deprecation warnings
on import, which are to be ignored. By default, these are already
included: ``compiler``, ``scipy``, ``pygments``, ``ipykernel``, and
``setuptools``.
warnings_to_ignore_entire_module : list of str
List of modules with deprecation warnings to ignore completely,
not just during import. If ``include_astropy_deprecations=True``
is given, ``AstropyDeprecationWarning`` and
``AstropyPendingDeprecationWarning`` are also ignored for the modules.
warnings_to_ignore_by_pyver : dict
Dictionary mapping tuple of ``(major, minor)`` Python version to
a list of deprecation warning messages to ignore.
Python version-agnostic warnings should be mapped to `None` key.
This is in addition of those already ignored by default
(see ``_warnings_to_ignore_by_pyver`` values).
"""
global _deprecations_as_exceptions
_deprecations_as_exceptions = True
global _include_astropy_deprecations
_include_astropy_deprecations = include_astropy_deprecations
global _modules_to_ignore_on_import
_modules_to_ignore_on_import.update(modules_to_ignore_on_import)
global _warnings_to_ignore_entire_module
_warnings_to_ignore_entire_module.update(warnings_to_ignore_entire_module)
global _warnings_to_ignore_by_pyver
for key, val in warnings_to_ignore_by_pyver.items():
if key in _warnings_to_ignore_by_pyver:
_warnings_to_ignore_by_pyver[key].update(val)
else:
_warnings_to_ignore_by_pyver[key] = set(val)
def treat_deprecations_as_exceptions():
"""
Turn all DeprecationWarnings (which indicate deprecated uses of
Python itself or Numpy, but not within Astropy, where we use our
own deprecation warning class) into exceptions so that we find
out about them early.
This completely resets the warning filters and any "already seen"
warning state.
"""
# First, totally reset the warning state. The modules may change during
# this iteration thus we copy the original state to a list to iterate
# on. See https://github.com/astropy/astropy/pull/5513.
for module in list(sys.modules.values()):
# We don't want to deal with six.MovedModules, only "real"
# modules. FIXME: we no more use six, this should be useless ?
if (isinstance(module, types.ModuleType) and
hasattr(module, '__warningregistry__')):
del module.__warningregistry__
if not _deprecations_as_exceptions:
return
warnings.resetwarnings()
# Hide the next couple of DeprecationWarnings
warnings.simplefilter('ignore', DeprecationWarning)
# Here's the wrinkle: a couple of our third-party dependencies
# (py.test and scipy) are still using deprecated features
# themselves, and we'd like to ignore those. Fortunately, those
# show up only at import time, so if we import those things *now*,
# before we turn the warnings into exceptions, we're golden.
for m in _modules_to_ignore_on_import:
try:
__import__(m)
except ImportError:
pass
# Now, start over again with the warning filters
warnings.resetwarnings()
# Now, turn DeprecationWarnings into exceptions
_all_warns = [DeprecationWarning]
# Only turn astropy deprecation warnings into exceptions if requested
if _include_astropy_deprecations:
_all_warns += [AstropyDeprecationWarning,
AstropyPendingDeprecationWarning]
for w in _all_warns:
warnings.filterwarnings("error", ".*", w)
# This ignores all deprecation warnings from given module(s),
# not just on import, for use of Astropy affiliated packages.
for m in _warnings_to_ignore_entire_module:
for w in _all_warns:
warnings.filterwarnings('ignore', category=w, module=m)
for v in _warnings_to_ignore_by_pyver:
if v is None or sys.version_info[:2] == v:
for s in _warnings_to_ignore_by_pyver[v]:
warnings.filterwarnings("ignore", s, DeprecationWarning)
class catch_warnings(warnings.catch_warnings):
"""
A high-powered version of warnings.catch_warnings to use for testing
and to make sure that there is no dependence on the order in which
the tests are run.
This completely blitzes any memory of any warnings that have
appeared before so that all warnings will be caught and displayed.
``*args`` is a set of warning classes to collect. If no arguments are
provided, all warnings are collected.
Use as follows::
with catch_warnings(MyCustomWarning) as w:
do.something.bad()
assert len(w) > 0
"""
def __init__(self, *classes):
super().__init__(record=True)
self.classes = classes
def __enter__(self):
warning_list = super().__enter__()
treat_deprecations_as_exceptions()
if len(self.classes) == 0:
warnings.simplefilter('always')
else:
warnings.simplefilter('ignore')
for cls in self.classes:
warnings.simplefilter('always', cls)
return warning_list
def __exit__(self, type, value, traceback):
treat_deprecations_as_exceptions()
class ignore_warnings(catch_warnings):
"""
This can be used either as a context manager or function decorator to
ignore all warnings that occur within a function or block of code.
An optional category option can be supplied to only ignore warnings of a
certain category or categories (if a list is provided).
"""
def __init__(self, category=None):
super().__init__()
if isinstance(category, type) and issubclass(category, Warning):
self.category = [category]
else:
self.category = category
def __call__(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Originally this just reused self, but that doesn't work if the
# function is called more than once so we need to make a new
# context manager instance for each call
with self.__class__(category=self.category):
return func(*args, **kwargs)
return wrapper
def __enter__(self):
retval = super().__enter__()
if self.category is not None:
for category in self.category:
warnings.simplefilter('ignore', category)
else:
warnings.simplefilter('ignore')
return retval
def assert_follows_unicode_guidelines(
x, roundtrip=None):
"""
Test that an object follows our Unicode policy. See
"Unicode guidelines" in the coding guidelines.
Parameters
----------
x : object
The instance to test
roundtrip : module, optional
When provided, this namespace will be used to evaluate
``repr(x)`` and ensure that it roundtrips. It will also
ensure that ``__bytes__(x)`` roundtrip.
If not provided, no roundtrip testing will be performed.
"""
from astropy import conf
with conf.set_temp('unicode_output', False):
bytes_x = bytes(x)
unicode_x = str(x)
repr_x = repr(x)
assert isinstance(bytes_x, bytes)
bytes_x.decode('ascii')
assert isinstance(unicode_x, str)
unicode_x.encode('ascii')
assert isinstance(repr_x, str)
if isinstance(repr_x, bytes):
repr_x.decode('ascii')
else:
repr_x.encode('ascii')
if roundtrip is not None:
assert x.__class__(bytes_x) == x
assert x.__class__(unicode_x) == x
assert eval(repr_x, roundtrip) == x
with conf.set_temp('unicode_output', True):
bytes_x = bytes(x)
unicode_x = str(x)
repr_x = repr(x)
assert isinstance(bytes_x, bytes)
bytes_x.decode('ascii')
assert isinstance(unicode_x, str)
assert isinstance(repr_x, str)
if isinstance(repr_x, bytes):
repr_x.decode('ascii')
else:
repr_x.encode('ascii')
if roundtrip is not None:
assert x.__class__(bytes_x) == x
assert x.__class__(unicode_x) == x
assert eval(repr_x, roundtrip) == x
@pytest.fixture(params=[0, 1, -1])
def pickle_protocol(request):
"""
Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced).
(Originally from astropy.table.tests.test_pickle)
"""
return request.param
def generic_recursive_equality_test(a, b, class_history):
"""
Check if the attributes of a and b are equal. Then,
check if the attributes of the attributes are equal.
"""
dict_a = a.__dict__
dict_b = b.__dict__
for key in dict_a:
assert key in dict_b,\
"Did not pickle {0}".format(key)
if hasattr(dict_a[key], '__eq__'):
eq = (dict_a[key] == dict_b[key])
if '__iter__' in dir(eq):
eq = (False not in eq)
assert eq, "Value of {0} changed by pickling".format(key)
if hasattr(dict_a[key], '__dict__'):
if dict_a[key].__class__ in class_history:
# attempt to prevent infinite recursion
pass
else:
new_class_history = [dict_a[key].__class__]
new_class_history.extend(class_history)
generic_recursive_equality_test(dict_a[key],
dict_b[key],
new_class_history)
def check_pickling_recovery(original, protocol):
"""
Try to pickle an object. If successful, make sure
the object's attributes survived pickling and unpickling.
"""
f = pickle.dumps(original, protocol=protocol)
unpickled = pickle.loads(f)
class_history = [original.__class__]
generic_recursive_equality_test(original, unpickled,
class_history)
def assert_quantity_allclose(actual, desired, rtol=1.e-7, atol=None,
**kwargs):
"""
Raise an assertion if two objects are not equal up to desired tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.testing.assert_allclose`.
"""
import numpy as np
from astropy.units.quantity import _unquantify_allclose_arguments
np.testing.assert_allclose(*_unquantify_allclose_arguments(
actual, desired, rtol, atol), **kwargs)
|
8a23eaf7dc84e8c5e6467d2ecdb8761553659a686d12ae3bca3b900821493149 | import matplotlib
from matplotlib import pyplot as plt
from astropy.utils.decorators import wraps
MPL_VERSION = matplotlib.__version__
ROOT = "http://{server}/testing/astropy/2018-10-24T12:38:34.134556/{mpl_version}/"
IMAGE_REFERENCE_DIR = (ROOT.format(server='data.astropy.org', mpl_version=MPL_VERSION[:3] + '.x') + ',' +
ROOT.format(server='www.astropy.org/astropy-data', mpl_version=MPL_VERSION[:3] + '.x'))
def ignore_matplotlibrc(func):
# This is a decorator for tests that use matplotlib but not pytest-mpl
# (which already handles rcParams)
@wraps(func)
def wrapper(*args, **kwargs):
with plt.style.context({}, after_reset=True):
return func(*args, **kwargs)
return wrapper
|
ebed5b2fa26c77c439001699bb88077063f218f052f8aca3deedbf7791713430 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is retained only for backwards compatibility. Affiliated packages
should no longer import ``disable_internet`` from ``astropy.tests``. It is
now available from ``pytest_remotedata``. However, this is not the
recommended mechanism for controlling access to remote data in tests.
Instead, packages should make use of decorators provided by the
pytest_remotedata plugin: - ``@pytest.mark.remote_data`` for tests that
require remote data access - ``@pytest.mark.internet_off`` for tests that
should only run when remote data access is disabled. Remote data access for
the test suite is controlled by the ``--remote-data`` command line flag. This
is either passed to ``pytest`` directly or to the ``setup.py test`` command.
TODO: This module should eventually be removed once backwards compatibility
is no longer supported.
"""
from warnings import warn
from astropy.utils.exceptions import AstropyDeprecationWarning
warn("The ``disable_internet`` module is no longer provided by astropy. It "
"is now available as ``pytest_remotedata.disable_internet``. However, "
"developers are encouraged to avoid using this module directly. See "
"<https://docs.astropy.org/en/latest/whatsnew/3.0.html#pytest-plugins> "
"for more information.", AstropyDeprecationWarning)
try:
# This should only be necessary during testing, in which case the test
# package must be installed anyway.
from pytest_remotedata.disable_internet import *
except ImportError:
pass
|
5d3b1c4f6aeaba3a915df4a728641b69420bdffb74f774b4abc5c8d969390aa6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import numpy as np
from astropy.table import groups, QTable, Table
from astropy.time import Time, TimeDelta
from astropy import units as u
from astropy.units import Quantity
from astropy.timeseries.core import BaseTimeSeries, autocheck_required_columns
__all__ = ['TimeSeries']
@autocheck_required_columns
class TimeSeries(BaseTimeSeries):
"""
A class to represent time series data in tabular form.
`~astropy.timeseries.TimeSeries` provides a class for representing time
series as a collection of values of different quantities measured at specific
points in time (for time series with finite time bins, see the
`~astropy.timeseries.BinnedTimeSeries` class).
`~astropy.timeseries.TimeSeries` is a sub-class of `~astropy.table.QTable`
and thus provides all the standard table maniplation methods available to
tables, but it also provides additional conveniences for dealing with time
series, such as a flexible initializer for setting up the times, a method
for folding time series, and a ``time`` attribute for easy access to the
time values.
See also: http://docs.astropy.org/en/stable/timeseries/
Parameters
----------
data : numpy ndarray, dict, list, `~astropy.table.Table`, or table-like object, optional
Data to initialize time series. This does not need to contain the times,
which can be provided separately, but if it does contain the times they
should be in a column called ``'time'`` to be automatically recognized.
time : `~astropy.time.Time` or iterable
The times at which the values are sampled - this can be either given
directly as a `~astropy.time.Time` array or as any iterable that
initializes the `~astropy.time.Time` class. If this is given, then
the remaining time-related arguments should not be used.
time_start : `~astropy.time.Time` or str
The time of the first sample in the time series. This is an alternative
to providing ``time`` and requires that ``time_delta`` is also provided.
time_delta : `~astropy.time.TimeDelta` or `~astropy.units.Quantity`
The step size in time for the series. This can either be a scalar if
the time series is evenly sampled, or an array of values if it is not.
n_samples : int
The number of time samples for the series. This is only used if both
``time_start`` and ``time_delta`` are provided and are scalar values.
**kwargs : dict, optional
Additional keyword arguments are passed to `~astropy.table.QTable`.
"""
_required_columns = ['time']
def __init__(self, data=None, *, time=None, time_start=None,
time_delta=None, n_samples=None, **kwargs):
super().__init__(data=data, **kwargs)
# For some operations, an empty time series needs to be created, then
# columns added one by one. We should check that when columns are added
# manually, time is added first and is of the right type.
if data is None and time is None and time_start is None and time_delta is None:
self._required_columns_relax = True
return
# First if time has been given in the table data, we should extract it
# and treat it as if it had been passed as a keyword argument.
if data is not None:
if n_samples is not None:
if n_samples != len(self):
raise TypeError("'n_samples' has been given both and it is not the "
"same length as the input data.")
else:
n_samples = len(self)
if 'time' in self.colnames:
if time is None:
time = self.columns['time']
else:
raise TypeError("'time' has been given both in the table and as a keyword argument")
if time is None and time_start is None:
raise TypeError("Either 'time' or 'time_start' should be specified")
elif time is not None and time_start is not None:
raise TypeError("Cannot specify both 'time' and 'time_start'")
if time is not None and not isinstance(time, Time):
time = Time(time)
if time_start is not None and not isinstance(time_start, Time):
time_start = Time(time_start)
if time_delta is not None and not isinstance(time_delta, (Quantity, TimeDelta)):
raise TypeError("'time_delta' should be a Quantity or a TimeDelta")
if isinstance(time_delta, TimeDelta):
time_delta = time_delta.sec * u.s
if time_start is not None:
# We interpret this as meaning that time is that of the first
# sample and that the interval is given by time_delta.
if time_delta is None:
raise TypeError("'time' is scalar, so 'time_delta' is required")
if time_delta.isscalar:
time_delta = np.repeat(time_delta, n_samples)
time_delta = np.cumsum(time_delta)
time_delta = np.roll(time_delta, 1)
time_delta[0] = 0. * u.s
time = time_start + time_delta
elif len(self.colnames) > 0 and len(time) != len(self):
raise ValueError("Length of 'time' ({0}) should match "
"data length ({1})".format(len(time), n_samples))
elif time_delta is not None:
raise TypeError("'time_delta' should not be specified since "
"'time' is an array")
with self._delay_required_column_checks():
if 'time' in self.colnames:
self.remove_column('time')
self.add_column(time, index=0, name='time')
@property
def time(self):
"""
The time values.
"""
return self['time']
def fold(self, period=None, midpoint_epoch=None):
"""
Return a new `~astropy.timeseries.TimeSeries` folded with a period and
midpoint epoch.
Parameters
----------
period : `~astropy.units.Quantity`
The period to use for folding
midpoint_epoch : `~astropy.time.Time`
The time to use as the midpoint epoch, at which the relative
time offset will be 0. Defaults to the first time in the time
series.
"""
folded = self.copy()
if midpoint_epoch is None:
midpoint_epoch = self.time[0]
else:
midpoint_epoch = Time(midpoint_epoch)
period_sec = period.to_value(u.s)
relative_time_sec = ((self.time - midpoint_epoch).sec + period_sec / 2) % period_sec - period_sec / 2
folded_time = TimeDelta(relative_time_sec * u.s)
with folded._delay_required_column_checks():
folded.remove_column('time')
folded.add_column(folded_time, name='time', index=0)
return folded
def __getitem__(self, item):
if self._is_list_or_tuple_of_str(item):
if 'time' not in item:
out = QTable([self[x] for x in item],
meta=deepcopy(self.meta),
copy_indices=self._copy_indices)
out._groups = groups.TableGroups(out, indices=self.groups._indices,
keys=self.groups._keys)
return out
return super().__getitem__(item)
def add_columns(self, *args, **kwargs):
"""
See :meth:`~astropy.table.Table.add_columns`.
"""
# Note that the docstring is inherited from QTable
result = super().add_columns(*args, **kwargs)
if len(self.indices) == 0 and 'time' in self.colnames:
self.add_index('time')
return result
@classmethod
def from_pandas(self, df, time_scale='utc'):
"""
Convert a :class:`~pandas.DataFrame` to a
:class:`astropy.timeseries.TimeSeries`.
Parameters
----------
df : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance.
time_scale : str
The time scale to pass into `astropy.time.Time`.
Defaults to ``UTC``.
"""
from pandas import DataFrame, DatetimeIndex
if not isinstance(df, DataFrame):
raise TypeError("Input should be a pandas DataFrame")
if not isinstance(df.index, DatetimeIndex):
raise TypeError("DataFrame does not have a DatetimeIndex")
time = Time(df.index, scale=time_scale)
table = Table.from_pandas(df)
return TimeSeries(time=time, data=table)
def to_pandas(self):
"""
Convert this :class:`~astropy.timeseries.TimeSeries` to a
:class:`~pandas.DataFrame` with a :class:`~pandas.DatetimeIndex` index.
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
"""
return Table(self).to_pandas(index='time')
@classmethod
def read(self, filename, time_column=None, time_format=None, time_scale=None, format=None, *args, **kwargs):
"""
Read and parse a file and returns a `astropy.timeseries.TimeSeries`.
This method uses the unified I/O infrastructure in Astropy which makes
it easy to define readers/writers for various classes
(http://docs.astropy.org/en/stable/io/unified.html). By default, this
method will try and use readers defined specifically for the
`astropy.timeseries.TimeSeries` class - however, it is also
possible to use the ``format`` keyword to specify formats defined for
the `astropy.table.Table` class - in this case, you will need to also
provide the column names for column containing the start times for the
bins, as well as other column names (see the Parameters section below
for details)::
>>> from astropy.timeseries import TimeSeries
>>> ts = TimeSeries.read('sampled.dat', format='ascii.ecsv',
... time_column='date') # doctest: +SKIP
Parameters
----------
filename : str
File to parse.
format : str
File format specifier.
time_column : str, optional
The name of the time column.
time_format : str, optional
The time format for the time column.
time_scale : str, optional
The time scale for the time column.
*args : tuple, optional
Positional arguments passed through to the data reader.
**kwargs : dict, optional
Keyword arguments passed through to the data reader.
Returns
-------
out : `astropy.timeseries.sampled.TimeSeries`
TimeSeries corresponding to file contents.
Notes
-----
"""
try:
# First we try the readers defined for the BinnedTimeSeries class
return super().read(filename, format=format, *args, **kwargs)
except TypeError:
# Otherwise we fall back to the default Table readers
if time_column is None:
raise ValueError("``time_column`` should be provided since the default Table readers are being used.")
table = Table.read(filename, format=format, *args, **kwargs)
if time_column in table.colnames:
time = Time(table.columns[time_column], scale=time_scale, format=time_format)
table.remove_column(time_column)
else:
raise ValueError("Time column '{}' not found in the input data.".format(time_column))
return TimeSeries(time=time, data=table)
|
973f9d047289fd739784f3f213edf30677538e76bcfa5ba93ef230d77dd026d0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from astropy import units as u
from astropy.utils.exceptions import AstropyUserWarning
from astropy.timeseries.sampled import TimeSeries
from astropy.timeseries.binned import BinnedTimeSeries
__all__ = ['aggregate_downsample']
def reduceat(array, indices, function):
"""
Manual reduceat functionality for cases where Numpy functions don't have a reduceat.
It will check if the input function has a reduceat and call that if it does.
"""
if hasattr(function, 'reduceat'):
return np.array(function.reduceat(array, indices))
else:
result = []
for i in range(len(indices) - 1):
if indices[i+1] <= indices[i]+1:
result.append(function(array[indices[i]]))
else:
result.append(function(array[indices[i]:indices[i+1]]))
result.append(function(array[indices[-1]:]))
return np.array(result)
def aggregate_downsample(time_series, *, time_bin_size=None, time_bin_start=None,
n_bins=None, aggregate_func=None):
"""
Downsample a time series by binning values into bins with a fixed size,
using a single function to combine the values in the bin.
Parameters
----------
time_series : :class:`~astropy.timeseries.TimeSeries`
The time series to downsample.
time_bin_size : `~astropy.units.Quantity`
The time interval for the binned time series.
time_bin_start : `~astropy.time.Time`, optional
The start time for the binned time series. Defaults to the first
time in the sampled time series.
n_bins : int, optional
The number of bins to use. Defaults to the number needed to fit all
the original points.
aggregate_func : callable, optional
The function to use for combining points in the same bin. Defaults
to np.nanmean.
Returns
-------
binned_time_series : :class:`~astropy.timeseries.BinnedTimeSeries`
The downsampled time series.
"""
if not isinstance(time_series, TimeSeries):
raise TypeError("time_series should be a TimeSeries")
if not isinstance(time_bin_size, u.Quantity):
raise TypeError("time_bin_size should be a astropy.unit quantity")
bin_size_sec = time_bin_size.to_value(u.s)
# Use the table sorted by time
sorted = time_series.iloc[:]
# Determine start time if needed
if time_bin_start is None:
time_bin_start = sorted.time[0]
# Find the relative time since the start time, in seconds
relative_time_sec = (sorted.time - time_bin_start).sec
# Determine the number of bins if needed
if n_bins is None:
n_bins = int(np.ceil(relative_time_sec[-1] / bin_size_sec))
if aggregate_func is None:
aggregate_func = np.nanmean
# Determine the bins
relative_bins_sec = np.cumsum(np.hstack([0, np.repeat(bin_size_sec, n_bins)]))
bins = time_bin_start + relative_bins_sec * u.s
# Find the subset of the table that is inside the bins
keep = ((relative_time_sec >= relative_bins_sec[0]) &
(relative_time_sec < relative_bins_sec[-1]))
subset = sorted[keep]
# Figure out which bin each row falls in - the -1 is because items
# falling in the first bins will have index 1 but we want that to be 0
indices = np.searchsorted(relative_bins_sec, relative_time_sec[keep]) - 1
# Add back the first time.
indices[relative_time_sec[keep] == relative_bins_sec[0]] = 0
# Create new binned time series
binned = BinnedTimeSeries(time_bin_start=bins[:-1], time_bin_end=bins[-1])
# Determine rows where values are defined
groups = np.hstack([0, np.nonzero(np.diff(indices))[0] + 1])
# Find unique indices to determine which rows in the final time series
# will not be empty.
unique_indices = np.unique(indices)
# Add back columns
for colname in subset.colnames:
if colname == 'time':
continue
values = subset[colname]
# FIXME: figure out how to avoid the following, if possible
if not isinstance(values, (np.ndarray, u.Quantity)):
warnings.warn("Skipping column {0} since it has a mix-in type", AstropyUserWarning)
continue
if isinstance(values, u.Quantity):
data = u.Quantity(np.repeat(np.nan, n_bins), unit=values.unit)
data[unique_indices] = u.Quantity(reduceat(values.value, groups, aggregate_func),
values.unit, copy=False)
else:
data = np.ma.zeros(n_bins, dtype=values.dtype)
data.mask = 1
data[unique_indices] = reduceat(values, groups, aggregate_func)
data.mask[unique_indices] = 0
binned[colname] = data
return binned
|
423e7c7730cf4e9586b1bf2b9444aa85746f642d729c50323799f38dd9c59605 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from types import FunctionType
from contextlib import contextmanager
from functools import wraps
from astropy.table import QTable
__all__ = ['BaseTimeSeries', 'autocheck_required_columns']
COLUMN_RELATED_METHODS = ['add_column',
'add_columns',
'keep_columns',
'remove_column',
'remove_columns',
'rename_column']
def autocheck_required_columns(cls):
"""
This is a decorator that ensures that the table contains specific
methods indicated by the _required_columns attribute. The aim is to
decorate all methods that might affect the columns in the table and check
for consistency after the methods have been run.
"""
def decorator_method(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
result = method(self, *args, **kwargs)
self._check_required_columns()
return result
return wrapper
for name in COLUMN_RELATED_METHODS:
if (not hasattr(cls, name) or
not isinstance(getattr(cls, name), FunctionType)):
raise ValueError("{0} is not a valid method".format(name))
setattr(cls, name, decorator_method(getattr(cls, name)))
return cls
class BaseTimeSeries(QTable):
_required_columns = None
_required_columns_enabled = True
# If _required_column_relax is True, we don't require the columns to be
# present but we do require them to be the correct ones IF present. Note
# that this is a temporary state - as soon as the required columns
# are all present, we toggle this to False
_required_columns_relax = False
def _check_required_columns(self):
if not self._required_columns_enabled:
return
if self._required_columns is not None:
if self._required_columns_relax:
required_columns = self._required_columns[:len(self.colnames)]
else:
required_columns = self._required_columns
plural = 's' if len(required_columns) > 1 else ''
if not self._required_columns_relax and len(self.colnames) == 0:
raise ValueError("{0} object is invalid - expected '{1}' "
"as the first column{2} but time series has no columns"
.format(self.__class__.__name__, required_columns[0], plural))
elif self.colnames[:len(required_columns)] != required_columns:
raise ValueError("{0} object is invalid - expected '{1}' "
"as the first column{2} but found '{3}'"
.format(self.__class__.__name__, required_columns[0], plural, self.colnames[0]))
if (self._required_columns_relax
and self._required_columns == self.colnames[:len(self._required_columns)]):
self._required_columns_relax = False
@contextmanager
def _delay_required_column_checks(self):
self._required_columns_enabled = False
yield
self._required_columns_enabled = True
self._check_required_columns()
|
a8964f8e34e91fa9e0e969725bce04f4790207ce8c853bf0a950f34ee8e4122c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import numpy as np
from astropy.table import groups, Table, QTable
from astropy.time import Time, TimeDelta
from astropy import units as u
from astropy.units import Quantity
from astropy.utils.misc import InheritDocstrings
from astropy.timeseries.core import BaseTimeSeries, autocheck_required_columns
__all__ = ['BinnedTimeSeries']
@autocheck_required_columns
class BinnedTimeSeries(BaseTimeSeries, metaclass=InheritDocstrings):
"""
A class to represent binned time series data in tabular form.
`~astropy.timeseries.BinnedTimeSeries` provides a class for representing
time series as a collection of values of different quantities measured in
time bins (for time series with values sampled at specific times, see the
`~astropy.timeseries.TimeSeries` class).
`~astropy.timeseries.BinnedTimeSeries` is a sub-class of `~astropy.table.QTable`
and thus provides all the standard table maniplation methods available to
tables, but it also provides additional conveniences for dealing with time
series, such as a flexible initializer for setting up the times, and
attributes to access the start/center/end time of bins.
See also: http://docs.astropy.org/en/stable/timeseries/
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize time series. This does not need to contain the times,
which can be provided separately, but if it does contain the times they
should be in columns called ``'time_bin_start'`` and ``'time_bin_size'``
to be automatically recognized.
time_bin_start : `~astropy.time.Time` or iterable
The times of the start of each bin - this can be either given
directly as a `~astropy.time.Time` array or as any iterable that
initializes the `~astropy.time.Time` class. If this is given, then
the remaining time-related arguments should not be used. This can also
be a scalar value if ``time_bin_size`` is provided.
time_bin_end : `~astropy.time.Time` or iterable
The times of the end of each bin - this can be either given directly as
a `~astropy.time.Time` array or as any value or iterable that
initializes the `~astropy.time.Time` class. If this is given, then the
remaining time-related arguments should not be used. This can only be
given if ``time_bin_start`` is an array of values. If ``time_bin_end``
is a scalar, time bins are assumed to be contiguous, such that the end
of each bin is the start of the next one, and ``time_bin_end`` gives the
end time for the last bin. If ``time_bin_end`` is an array, the time
bins do not need to be contiguous. If this argument is provided,
``time_bin_size`` should not be provided.
time_bin_size : `~astropy.time.TimeDelta` or `~astropy.units.Quantity`
The size of the time bins, either as a scalar value (in which case all
time bins will be assumed to have the same duration) or as an array of
values (in which case each time bin can have a different duration).
If this argument is provided, ``time_bin_end`` should not be provided.
n_bins : int
The number of time bins for the series. This is only used if both
``time_bin_start`` and ``time_bin_size`` are provided and are scalar
values.
**kwargs : dict, optional
Additional keyword arguments are passed to `~astropy.table.QTable`.
"""
_required_columns = ['time_bin_start', 'time_bin_size']
def __init__(self, data=None, *, time_bin_start=None, time_bin_end=None,
time_bin_size=None, n_bins=None, **kwargs):
super().__init__(data=data, **kwargs)
# For some operations, an empty time series needs to be created, then
# columns added one by one. We should check that when columns are added
# manually, time is added first and is of the right type.
if (data is None and time_bin_start is None and time_bin_end is None and
time_bin_size is None and n_bins is None):
self._required_columns_relax = True
return
# First if time_bin_start and time_bin_end have been given in the table data, we
# should extract them and treat them as if they had been passed as
# keyword arguments.
if 'time_bin_start' in self.colnames:
if time_bin_start is None:
time_bin_start = self.columns['time_bin_start']
else:
raise TypeError("'time_bin_start' has been given both in the table "
"and as a keyword argument")
if 'time_bin_size' in self.colnames:
if time_bin_size is None:
time_bin_size = self.columns['time_bin_size']
else:
raise TypeError("'time_bin_size' has been given both in the table "
"and as a keyword argument")
if time_bin_start is None:
raise TypeError("'time_bin_start' has not been specified")
if time_bin_end is None and time_bin_size is None:
raise TypeError("Either 'time_bin_size' or 'time_bin_end' should be specified")
if not isinstance(time_bin_start, Time):
time_bin_start = Time(time_bin_start)
if time_bin_end is not None and not isinstance(time_bin_end, Time):
time_bin_end = Time(time_bin_end)
if time_bin_size is not None and not isinstance(time_bin_size, (Quantity, TimeDelta)):
raise TypeError("'time_bin_size' should be a Quantity or a TimeDelta")
if isinstance(time_bin_size, TimeDelta):
time_bin_size = time_bin_size.sec * u.s
if time_bin_start.isscalar:
# We interpret this as meaning that this is the start of the
# first bin and that the bins are contiguous. In this case,
# we require time_bin_size to be specified.
if time_bin_size is None:
raise TypeError("'time_bin_start' is scalar, so 'time_bin_size' is required")
if time_bin_size.isscalar:
if data is not None:
if n_bins is not None:
if n_bins != len(self):
raise TypeError("'n_bins' has been given and it is not the "
"same length as the input data.")
else:
n_bins = len(self)
time_bin_size = np.repeat(time_bin_size, n_bins)
time_delta = np.cumsum(time_bin_size)
time_bin_end = time_bin_start + time_delta
# Now shift the array so that the first entry is 0
time_delta = np.roll(time_delta, 1)
time_delta[0] = 0. * u.s
# Make time_bin_start into an array
time_bin_start = time_bin_start + time_delta
else:
if len(self.colnames) > 0 and len(time_bin_start) != len(self):
raise ValueError("Length of 'time_bin_start' ({0}) should match "
"table length ({1})".format(len(time_bin_start), len(self)))
if time_bin_end is not None:
if time_bin_end.isscalar:
times = time_bin_start.copy()
times[:-1] = times[1:]
times[-1] = time_bin_end
time_bin_end = times
time_bin_size = (time_bin_end - time_bin_start).sec * u.s
if time_bin_size.isscalar:
time_bin_size = np.repeat(time_bin_size, len(self))
with self._delay_required_column_checks():
if 'time_bin_start' in self.colnames:
self.remove_column('time_bin_start')
if 'time_bin_size' in self.colnames:
self.remove_column('time_bin_size')
self.add_column(time_bin_start, index=0, name='time_bin_start')
self.add_index('time_bin_start')
self.add_column(time_bin_size, index=1, name='time_bin_size')
@property
def time_bin_start(self):
"""
The start times of all the time bins.
"""
return self['time_bin_start']
@property
def time_bin_center(self):
"""
The center times of all the time bins.
"""
return self['time_bin_start'] + self['time_bin_size'] * 0.5
@property
def time_bin_end(self):
"""
The end times of all the time bins.
"""
return self['time_bin_start'] + self['time_bin_size']
@property
def time_bin_size(self):
"""
The sizes of all the time bins.
"""
return self['time_bin_size']
def __getitem__(self, item):
if self._is_list_or_tuple_of_str(item):
if 'time_bin_start' not in item or 'time_bin_size' not in item:
out = QTable([self[x] for x in item],
meta=deepcopy(self.meta),
copy_indices=self._copy_indices)
out._groups = groups.TableGroups(out, indices=self.groups._indices,
keys=self.groups._keys)
return out
return super().__getitem__(item)
@classmethod
def read(self, filename, time_bin_start_column=None, time_bin_end_column=None,
time_bin_size_column=None, time_bin_size_unit=None, time_format=None, time_scale=None,
format=None, *args, **kwargs):
"""
Read and parse a file and returns a `astropy.timeseries.BinnedTimeSeries`.
This method uses the unified I/O infrastructure in Astropy which makes
it easy to define readers/writers for various classes
(http://docs.astropy.org/en/stable/io/unified.html). By default, this
method will try and use readers defined specifically for the
`astropy.timeseries.BinnedTimeSeries` class - however, it is also
possible to use the ``format`` keyword to specify formats defined for
the `astropy.table.Table` class - in this case, you will need to also
provide the column names for column containing the start times for the
bins, as well as other column names (see the Parameters section below
for details)::
>>> from astropy.timeseries.binned import BinnedTimeSeries
>>> ts = BinnedTimeSeries.read('binned.dat', format='ascii.ecsv',
... time_bin_start_column='date_start',
... time_bin_end_column='date_end') # doctest: +SKIP
Parameters
----------
filename : str
File to parse.
format : str
File format specifier.
time_bin_start_column : str
The name of the column with the start time for each bin.
time_bin_end_column : str, optional
The name of the column with the end time for each bin. Either this
option or ``time_bin_size_column`` should be specified.
time_bin_size_column : str, optional
The name of the column with the size for each bin. Either this
option or ``time_bin_end_column`` should be specified.
time_bin_size_unit : `astropy.units.Unit`, optional
If ``time_bin_size_column`` is specified but does not have a unit
set in the table, you can specify the unit manually.
time_format : str, optional
The time format for the start and end columns.
time_scale : str, optional
The time scale for the start and end columns.
*args : tuple, optional
Positional arguments passed through to the data reader.
**kwargs : dict, optional
Keyword arguments passed through to the data reader.
Returns
-------
out : `astropy.timeseries.binned.BinnedTimeSeries`
BinnedTimeSeries corresponding to the file.
"""
try:
# First we try the readers defined for the BinnedTimeSeries class
return super().read(filename, format=format, *args, **kwargs)
except TypeError:
# Otherwise we fall back to the default Table readers
if time_bin_start_column is None:
raise ValueError("``time_bin_start_column`` should be provided since the default Table readers are being used.")
if time_bin_end_column is None and time_bin_size_column is None:
raise ValueError("Either `time_bin_end_column` or `time_bin_size_column` should be provided.")
elif time_bin_end_column is not None and time_bin_size_column is not None:
raise ValueError("Cannot specify both `time_bin_end_column` and `time_bin_size_column`.")
table = Table.read(filename, format=format, *args, **kwargs)
if time_bin_start_column in table.colnames:
time_bin_start = Time(table.columns[time_bin_start_column],
scale=time_scale, format=time_format)
table.remove_column(time_bin_start_column)
else:
raise ValueError("Bin start time column '{}' not found in the input data.".format(time_bin_start_column))
if time_bin_end_column is not None:
if time_bin_end_column in table.colnames:
time_bin_end = Time(table.columns[time_bin_end_column],
scale=time_scale, format=time_format)
table.remove_column(time_bin_end_column)
else:
raise ValueError("Bin end time column '{}' not found in the input data.".format(time_bin_end_column))
time_bin_size = None
elif time_bin_size_column is not None:
if time_bin_size_column in table.colnames:
time_bin_size = table.columns[time_bin_size_column]
table.remove_column(time_bin_size_column)
else:
raise ValueError("Bin size column '{}' not found in the input data.".format(time_bin_size_column))
if time_bin_size.unit is None:
if time_bin_size_unit is None or not isinstance(time_bin_size_unit, u.UnitBase):
raise ValueError("The bin size unit should be specified as an astropy Unit using ``time_bin_size_unit``.")
time_bin_size = time_bin_size * time_bin_size_unit
else:
time_bin_size = u.Quantity(time_bin_size)
time_bin_end = None
return BinnedTimeSeries(data=table,
time_bin_start=time_bin_start,
time_bin_end=time_bin_end,
time_bin_size=time_bin_size,
n_bins=len(table))
|
0b76356da658dd45d5ca28757b0800984a3b7a8ed4ca543e6b5d2338c3911ba2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains astronomical and physical constants for use in Astropy or other
places.
A typical use case might be::
>>> from astropy.constants import c, m_e
>>> # ... define the mass of something you want the rest energy of as m ...
>>> m = m_e
>>> E = m * c**2
>>> E.to('MeV') # doctest: +FLOAT_CMP
<Quantity 0.510998927603161 MeV>
"""
import inspect
from contextlib import contextmanager
# Hack to make circular imports with units work
try:
from astropy import units
del units
except ImportError:
pass
from .constant import Constant, EMConstant # noqa
from . import si # noqa
from . import cgs # noqa
from . import codata2014, iau2015 # noqa
from . import utils as _utils
# for updating the constants module docstring
_lines = [
'The following constants are available:\n',
'========== ============== ================ =========================',
' Name Value Unit Description',
'========== ============== ================ =========================',
]
# NOTE: Update this when default changes.
_utils._set_c(codata2014, iau2015, inspect.getmodule(inspect.currentframe()),
not_in_module_only=True, doclines=_lines, set_class=True)
_lines.append(_lines[1])
if __doc__ is not None:
__doc__ += '\n'.join(_lines)
# TODO: Re-implement in a way that is more consistent with astropy.units.
# See https://github.com/astropy/astropy/pull/7008 discussions.
@contextmanager
def set_enabled_constants(modname):
"""
Context manager to temporarily set values in the ``constants``
namespace to an older version.
See :ref:`astropy-constants-prior` for usage.
Parameters
----------
modname : {'astropyconst13'}
Name of the module containing an older version.
"""
# Re-import here because these were deleted from namespace on init.
import inspect
import warnings
from . import utils as _utils
# NOTE: Update this when default changes.
if modname == 'astropyconst13':
from .astropyconst13 import codata2010 as codata
from .astropyconst13 import iau2012 as iaudata
else:
raise ValueError(
'Context manager does not currently handle {}'.format(modname))
module = inspect.getmodule(inspect.currentframe())
# Ignore warnings about "Constant xxx already has a definition..."
with warnings.catch_warnings():
warnings.simplefilter('ignore')
_utils._set_c(codata, iaudata, module,
not_in_module_only=False, set_class=True)
try:
yield
finally:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
# NOTE: Update this when default changes.
_utils._set_c(codata2014, iau2015, module,
not_in_module_only=False, set_class=True)
# Clean up namespace
del inspect
del contextmanager
del _utils
del _lines
|
bd0aa72b93a18e9ef0afb415867b17d511f44735033d85da7d51c821be648a0c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import types
import warnings
import numpy as np
from astropy.units.core import Unit, UnitsError
from astropy.units.quantity import Quantity
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import InheritDocstrings
__all__ = ['Constant', 'EMConstant']
class ConstantMeta(InheritDocstrings):
"""Metaclass for the :class:`Constant`. The primary purpose of this is to
wrap the double-underscore methods of :class:`Quantity` which is the
superclass of :class:`Constant`.
In particular this wraps the operator overloads such as `__add__` to
prevent their use with constants such as ``e`` from being used in
expressions without specifying a system. The wrapper checks to see if the
constant is listed (by name) in ``Constant._has_incompatible_units``, a set
of those constants that are defined in different systems of units are
physically incompatible. It also performs this check on each `Constant` if
it hasn't already been performed (the check is deferred until the
`Constant` is actually used in an expression to speed up import times,
among other reasons).
"""
def __new__(mcls, name, bases, d):
def wrap(meth):
@functools.wraps(meth)
def wrapper(self, *args, **kwargs):
name_lower = self.name.lower()
instances = self._registry[name_lower]
if not self._checked_units:
for inst in instances.values():
try:
self.unit.to(inst.unit)
except UnitsError:
self._has_incompatible_units.add(name_lower)
self._checked_units = True
if (not self.system and
name_lower in self._has_incompatible_units):
systems = sorted([x for x in instances if x])
raise TypeError(
'Constant {0!r} does not have physically compatible '
'units across all systems of units and cannot be '
'combined with other values without specifying a '
'system (eg. {1}.{2})'.format(self.abbrev, self.abbrev,
systems[0]))
return meth(self, *args, **kwargs)
return wrapper
# The wrapper applies to so many of the __ methods that it's easier to
# just exclude the ones it doesn't apply to
exclude = set(['__new__', '__array_finalize__', '__array_wrap__',
'__dir__', '__getattr__', '__init__', '__str__',
'__repr__', '__hash__', '__iter__', '__getitem__',
'__len__', '__bool__', '__quantity_subclass__'])
for attr, value in vars(Quantity).items():
if (isinstance(value, types.FunctionType) and
attr.startswith('__') and attr.endswith('__') and
attr not in exclude):
d[attr] = wrap(value)
return super().__new__(mcls, name, bases, d)
class Constant(Quantity, metaclass=ConstantMeta):
"""A physical or astronomical constant.
These objects are quantities that are meant to represent physical
constants.
"""
_registry = {}
_has_incompatible_units = set()
def __new__(cls, abbrev, name, value, unit, uncertainty,
reference=None, system=None):
if reference is None:
reference = getattr(cls, 'default_reference', None)
if reference is None:
raise TypeError("{} requires a reference.".format(cls))
name_lower = name.lower()
instances = cls._registry.setdefault(name_lower, {})
# By-pass Quantity initialization, since units may not yet be
# initialized here, and we store the unit in string form.
inst = np.array(value).view(cls)
if system in instances:
warnings.warn('Constant {0!r} already has a definition in the '
'{1!r} system from {2!r} reference'.format(
name, system, reference), AstropyUserWarning)
for c in instances.values():
if system is not None and not hasattr(c.__class__, system):
setattr(c, system, inst)
if c.system is not None and not hasattr(inst.__class__, c.system):
setattr(inst, c.system, c)
instances[system] = inst
inst._abbrev = abbrev
inst._name = name
inst._value = value
inst._unit_string = unit
inst._uncertainty = uncertainty
inst._reference = reference
inst._system = system
inst._checked_units = False
return inst
def __repr__(self):
return ('<{0} name={1!r} value={2} uncertainty={3} unit={4!r} '
'reference={5!r}>'.format(self.__class__, self.name, self.value,
self.uncertainty, str(self.unit),
self.reference))
def __str__(self):
return (' Name = {0}\n'
' Value = {1}\n'
' Uncertainty = {2}\n'
' Unit = {3}\n'
' Reference = {4}'.format(self.name, self.value,
self.uncertainty, self.unit,
self.reference))
def __quantity_subclass__(self, unit):
return super().__quantity_subclass__(unit)[0], False
def copy(self):
"""
Return a copy of this `Constant` instance. Since they are by
definition immutable, this merely returns another reference to
``self``.
"""
return self
__deepcopy__ = __copy__ = copy
@property
def abbrev(self):
"""A typical ASCII text abbreviation of the constant, also generally
the same as the Python variable used for this constant.
"""
return self._abbrev
@property
def name(self):
"""The full name of the constant."""
return self._name
@lazyproperty
def _unit(self):
"""The unit(s) in which this constant is defined."""
return Unit(self._unit_string)
@property
def uncertainty(self):
"""The known uncertainty in this constant's value."""
return self._uncertainty
@property
def reference(self):
"""The source used for the value of this constant."""
return self._reference
@property
def system(self):
"""The system of units in which this constant is defined (typically
`None` so long as the constant's units can be directly converted
between systems).
"""
return self._system
def _instance_or_super(self, key):
instances = self._registry[self.name.lower()]
inst = instances.get(key)
if inst is not None:
return inst
else:
return getattr(super(), key)
@property
def si(self):
"""If the Constant is defined in the SI system return that instance of
the constant, else convert to a Quantity in the appropriate SI units.
"""
return self._instance_or_super('si')
@property
def cgs(self):
"""If the Constant is defined in the CGS system return that instance of
the constant, else convert to a Quantity in the appropriate CGS units.
"""
return self._instance_or_super('cgs')
def __array_finalize__(self, obj):
for attr in ('_abbrev', '_name', '_value', '_unit_string',
'_uncertainty', '_reference', '_system'):
setattr(self, attr, getattr(obj, attr, None))
self._checked_units = getattr(obj, '_checked_units', False)
class EMConstant(Constant):
"""An electromagnetic constant."""
@property
def cgs(self):
"""Overridden for EMConstant to raise a `TypeError`
emphasizing that there are multiple EM extensions to CGS.
"""
raise TypeError("Cannot convert EM constants to cgs because there "
"are different systems for E.M constants within the "
"c.g.s system (ESU, Gaussian, etc.). Instead, "
"directly use the constant with the appropriate "
"suffix (e.g. e.esu, e.gauss, etc.).")
|
7fea54c7d2e8cdac3eab6643fe9a57b156cd122abc49b7e9924adec572c8e4ee | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
from math import acos, sin, cos, sqrt, pi, exp, log, floor
from abc import ABCMeta, abstractmethod
from inspect import signature
import numpy as np
from . import scalar_inv_efuncs
from astropy import constants as const
from astropy import units as u
from astropy.utils import isiterable
from astropy.utils.state import ScienceState
from . import parameters
# Originally authored by Andrew Becker ([email protected]),
# and modified by Neil Crighton ([email protected]) and Roban
# Kramer ([email protected]).
# Many of these adapted from Hogg 1999, astro-ph/9905116
# and Linder 2003, PRL 90, 91301
__all__ = ["FLRW", "LambdaCDM", "FlatLambdaCDM", "wCDM", "FlatwCDM",
"Flatw0waCDM", "w0waCDM", "wpwaCDM", "w0wzCDM",
"default_cosmology"] + parameters.available
__doctest_requires__ = {'*': ['scipy.integrate', 'scipy.special']}
# Notes about speeding up integrals:
# ---------------------------------
# The supplied cosmology classes use a few tricks to speed
# up distance and time integrals. It is not necessary for
# anyone subclassing FLRW to use these tricks -- but if they
# do, such calculations may be a lot faster.
# The first, more basic, idea is that, in many cases, it's a big deal to
# provide explicit formulae for inv_efunc rather than simply
# setting up de_energy_scale -- assuming there is a nice expression.
# As noted above, almost all of the provided classes do this, and
# that template can pretty much be followed directly with the appropriate
# formula changes.
# The second, and more advanced, option is to also explicitly
# provide a scalar only version of inv_efunc. This results in a fairly
# large speedup (>10x in most cases) in the distance and age integrals,
# even if only done in python, because testing whether the inputs are
# iterable or pure scalars turns out to be rather expensive. To take
# advantage of this, the key thing is to explicitly set the
# instance variables self._inv_efunc_scalar and self._inv_efunc_scalar_args
# in the constructor for the subclass, where the latter are all the
# arguments except z to _inv_efunc_scalar.
#
# The provided classes do use this optimization, and in fact go
# even further and provide optimizations for no radiation, and for radiation
# with massless neutrinos coded in cython. Consult the subclasses for
# details, and scalar_inv_efuncs for the details.
#
# However, the important point is that it is -not- necessary to do this.
# Some conversion constants -- useful to compute them once here
# and reuse in the initialization rather than have every object do them
# Note that the call to cgs is actually extremely expensive,
# so we actually skip using the units package directly, and
# hardwire the conversion from mks to cgs. This assumes that constants
# will always return mks by default -- if this is made faster for simple
# cases like this, it should be changed back.
# Note that the unit tests should catch it if this happens
H0units_to_invs = (u.km / (u.s * u.Mpc)).to(1.0 / u.s)
sec_to_Gyr = u.s.to(u.Gyr)
# const in critical density in cgs units (g cm^-3)
critdens_const = 3. / (8. * pi * const.G.value * 1000)
arcsec_in_radians = pi / (3600. * 180)
arcmin_in_radians = pi / (60. * 180)
# Radiation parameter over c^2 in cgs (g cm^-3 K^-4)
a_B_c2 = 4e-3 * const.sigma_sb.value / const.c.value ** 3
# Boltzmann constant in eV / K
kB_evK = const.k_B.to(u.eV / u.K)
class CosmologyError(Exception):
pass
class Cosmology:
""" Placeholder for when a more general Cosmology class is
implemented. """
class FLRW(Cosmology, metaclass=ABCMeta):
""" A class describing an isotropic and homogeneous
(Friedmann-Lemaitre-Robertson-Walker) cosmology.
This is an abstract base class -- you can't instantiate
examples of this class, but must work with one of its
subclasses such as `LambdaCDM` or `wCDM`.
Parameters
----------
H0 : float or scalar `~astropy.units.Quantity`
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0. Note that this does not include
massive neutrinos.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
Tcmb0 : float or scalar `~astropy.units.Quantity`, optional
Temperature of the CMB z=0. If a float, must be in [K].
Default: 0 [K]. Setting this to zero will turn off both photons
and neutrinos (even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : `~astropy.units.Quantity`, optional
Mass of each neutrino species. If this is a scalar Quantity, then all
neutrino species are assumed to have that mass. Otherwise, the mass of
each species. The actual number of neutrino species (and hence the
number of elements of m_nu if it is not scalar) must be the floor of
Neff. Typically this means you should provide three neutrino masses
unless you are considering something like a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any
computation that requires its value will raise an exception.
name : str, optional
Name for this cosmological object.
Notes
-----
Class instances are static -- you can't change the values
of the parameters. That is, all of the attributes above are
read only.
"""
def __init__(self, H0, Om0, Ode0, Tcmb0=0, Neff=3.04,
m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None):
# all densities are in units of the critical density
self._Om0 = float(Om0)
if self._Om0 < 0.0:
raise ValueError("Matter density can not be negative")
self._Ode0 = float(Ode0)
if Ob0 is not None:
self._Ob0 = float(Ob0)
if self._Ob0 < 0.0:
raise ValueError("Baryonic density can not be negative")
if self._Ob0 > self._Om0:
raise ValueError("Baryonic density can not be larger than "
"total matter density")
self._Odm0 = self._Om0 - self._Ob0
else:
self._Ob0 = None
self._Odm0 = None
self._Neff = float(Neff)
if self._Neff < 0.0:
raise ValueError("Effective number of neutrinos can "
"not be negative")
self.name = name
# Tcmb may have units
self._Tcmb0 = u.Quantity(Tcmb0, unit=u.K)
if not self._Tcmb0.isscalar:
raise ValueError("Tcmb0 is a non-scalar quantity")
# Hubble parameter at z=0, km/s/Mpc
self._H0 = u.Quantity(H0, unit=u.km / u.s / u.Mpc)
if not self._H0.isscalar:
raise ValueError("H0 is a non-scalar quantity")
# 100 km/s/Mpc * h = H0 (so h is dimensionless)
self._h = self._H0.value / 100.
# Hubble distance
self._hubble_distance = (const.c / self._H0).to(u.Mpc)
# H0 in s^-1; don't use units for speed
H0_s = self._H0.value * H0units_to_invs
# Hubble time; again, avoiding units package for speed
self._hubble_time = u.Quantity(sec_to_Gyr / H0_s, u.Gyr)
# critical density at z=0 (grams per cubic cm)
cd0value = critdens_const * H0_s ** 2
self._critical_density0 = u.Quantity(cd0value, u.g / u.cm ** 3)
# Load up neutrino masses. Note: in Py2.x, floor is floating
self._nneutrinos = int(floor(self._Neff))
# We are going to share Neff between the neutrinos equally.
# In detail this is not correct, but it is a standard assumption
# because properly calculating it is a) complicated b) depends
# on the details of the massive neutrinos (e.g., their weak
# interactions, which could be unusual if one is considering sterile
# neutrinos)
self._massivenu = False
if self._nneutrinos > 0 and self._Tcmb0.value > 0:
self._neff_per_nu = self._Neff / self._nneutrinos
# We can't use the u.Quantity constructor as we do above
# because it doesn't understand equivalencies
if not isinstance(m_nu, u.Quantity):
raise ValueError("m_nu must be a Quantity")
m_nu = m_nu.to(u.eV, equivalencies=u.mass_energy())
# Now, figure out if we have massive neutrinos to deal with,
# and, if so, get the right number of masses
# It is worth the effort to keep track of massless ones separately
# (since they are quite easy to deal with, and a common use case
# is to set only one neutrino to have mass)
if m_nu.isscalar:
# Assume all neutrinos have the same mass
if m_nu.value == 0:
self._nmasslessnu = self._nneutrinos
self._nmassivenu = 0
else:
self._massivenu = True
self._nmasslessnu = 0
self._nmassivenu = self._nneutrinos
self._massivenu_mass = (m_nu.value *
np.ones(self._nneutrinos))
else:
# Make sure we have the right number of masses
# -unless- they are massless, in which case we cheat a little
if m_nu.value.min() < 0:
raise ValueError("Invalid (negative) neutrino mass"
" encountered")
if m_nu.value.max() == 0:
self._nmasslessnu = self._nneutrinos
self._nmassivenu = 0
else:
self._massivenu = True
if len(m_nu) != self._nneutrinos:
errstr = "Unexpected number of neutrino masses"
raise ValueError(errstr)
# Segregate out the massless ones
self._nmasslessnu = len(np.nonzero(m_nu.value == 0)[0])
self._nmassivenu = self._nneutrinos - self._nmasslessnu
w = np.nonzero(m_nu.value > 0)[0]
self._massivenu_mass = m_nu[w]
# Compute photon density, Tcmb, neutrino parameters
# Tcmb0=0 removes both photons and neutrinos, is handled
# as a special case for efficiency
if self._Tcmb0.value > 0:
# Compute photon density from Tcmb
self._Ogamma0 = a_B_c2 * self._Tcmb0.value ** 4 /\
self._critical_density0.value
# Compute Neutrino temperature
# The constant in front is (4/11)^1/3 -- see any
# cosmology book for an explanation -- for example,
# Weinberg 'Cosmology' p 154 eq (3.1.21)
self._Tnu0 = 0.7137658555036082 * self._Tcmb0
# Compute Neutrino Omega and total relativistic component
# for massive neutrinos. We also store a list version,
# since that is more efficient to do integrals with (perhaps
# surprisingly! But small python lists are more efficient
# than small numpy arrays).
if self._massivenu:
nu_y = self._massivenu_mass / (kB_evK * self._Tnu0)
self._nu_y = nu_y.value
self._nu_y_list = self._nu_y.tolist()
self._Onu0 = self._Ogamma0 * self.nu_relative_density(0)
else:
# This case is particularly simple, so do it directly
# The 0.2271... is 7/8 (4/11)^(4/3) -- the temperature
# bit ^4 (blackbody energy density) times 7/8 for
# FD vs. BE statistics.
self._Onu0 = 0.22710731766 * self._Neff * self._Ogamma0
else:
self._Ogamma0 = 0.0
self._Tnu0 = u.Quantity(0.0, u.K)
self._Onu0 = 0.0
# Compute curvature density
self._Ok0 = 1.0 - self._Om0 - self._Ode0 - self._Ogamma0 - self._Onu0
# Subclasses should override this reference if they provide
# more efficient scalar versions of inv_efunc.
self._inv_efunc_scalar = self.inv_efunc
self._inv_efunc_scalar_args = ()
def _namelead(self):
""" Helper function for constructing __repr__"""
if self.name is None:
return "{0}(".format(self.__class__.__name__)
else:
return "{0}(name=\"{1}\", ".format(self.__class__.__name__,
self.name)
def __repr__(self):
retstr = "{0}H0={1:.3g}, Om0={2:.3g}, Ode0={3:.3g}, "\
"Tcmb0={4:.4g}, Neff={5:.3g}, m_nu={6}, "\
"Ob0={7:s})"
return retstr.format(self._namelead(), self._H0, self._Om0, self._Ode0,
self._Tcmb0, self._Neff, self.m_nu,
_float_or_none(self._Ob0))
# Set up a set of properties for H0, Om0, Ode0, Ok0, etc. for user access.
# Note that we don't let these be set (so, obj.Om0 = value fails)
@property
def H0(self):
""" Return the Hubble constant as an `~astropy.units.Quantity` at z=0"""
return self._H0
@property
def Om0(self):
""" Omega matter; matter density/critical density at z=0"""
return self._Om0
@property
def Ode0(self):
""" Omega dark energy; dark energy density/critical density at z=0"""
return self._Ode0
@property
def Ob0(self):
""" Omega baryon; baryonic matter density/critical density at z=0"""
return self._Ob0
@property
def Odm0(self):
""" Omega dark matter; dark matter density/critical density at z=0"""
return self._Odm0
@property
def Ok0(self):
""" Omega curvature; the effective curvature density/critical density
at z=0"""
return self._Ok0
@property
def Tcmb0(self):
""" Temperature of the CMB as `~astropy.units.Quantity` at z=0"""
return self._Tcmb0
@property
def Tnu0(self):
""" Temperature of the neutrino background as `~astropy.units.Quantity` at z=0"""
return self._Tnu0
@property
def Neff(self):
""" Number of effective neutrino species"""
return self._Neff
@property
def has_massive_nu(self):
""" Does this cosmology have at least one massive neutrino species?"""
if self._Tnu0.value == 0:
return False
return self._massivenu
@property
def m_nu(self):
""" Mass of neutrino species"""
if self._Tnu0.value == 0:
return None
if not self._massivenu:
# Only massless
return u.Quantity(np.zeros(self._nmasslessnu), u.eV)
if self._nmasslessnu == 0:
# Only massive
return u.Quantity(self._massivenu_mass, u.eV)
# A mix -- the most complicated case
numass = np.append(np.zeros(self._nmasslessnu),
self._massivenu_mass.value)
return u.Quantity(numass, u.eV)
@property
def h(self):
""" Dimensionless Hubble constant: h = H_0 / 100 [km/sec/Mpc]"""
return self._h
@property
def hubble_time(self):
""" Hubble time as `~astropy.units.Quantity`"""
return self._hubble_time
@property
def hubble_distance(self):
""" Hubble distance as `~astropy.units.Quantity`"""
return self._hubble_distance
@property
def critical_density0(self):
""" Critical density as `~astropy.units.Quantity` at z=0"""
return self._critical_density0
@property
def Ogamma0(self):
""" Omega gamma; the density/critical density of photons at z=0"""
return self._Ogamma0
@property
def Onu0(self):
""" Omega nu; the density/critical density of neutrinos at z=0"""
return self._Onu0
def clone(self, **kwargs):
""" Returns a copy of this object, potentially with some changes.
Returns
-------
newcos : Subclass of FLRW
A new instance of this class with the specified changes.
Notes
-----
This assumes that the values of all constructor arguments
are available as properties, which is true of all the provided
subclasses but may not be true of user-provided ones. You can't
change the type of class, so this can't be used to change between
flat and non-flat. If no modifications are requested, then
a reference to this object is returned.
Examples
--------
To make a copy of the Planck13 cosmology with a different Omega_m
and a new name:
>>> from astropy.cosmology import Planck13
>>> newcos = Planck13.clone(name="Modified Planck 2013", Om0=0.35)
"""
# Quick return check, taking advantage of the
# immutability of cosmological objects
if len(kwargs) == 0:
return self
# Get constructor arguments
arglist = signature(self.__init__).parameters.keys()
# Build the dictionary of values used to construct this
# object. This -assumes- every argument to __init__ has a
# property. This is true of all the classes we provide, but
# maybe a user won't do that. So at least try to have a useful
# error message.
argdict = {}
for arg in arglist:
try:
val = getattr(self, arg)
argdict[arg] = val
except AttributeError:
# We didn't find a property -- complain usefully
errstr = "Object did not have property corresponding "\
"to constructor argument '{}'; perhaps it is a "\
"user provided subclass that does not do so"
raise AttributeError(errstr.format(arg))
# Now substitute in new arguments
for newarg in kwargs:
if newarg not in argdict:
errstr = "User provided argument '{}' not found in "\
"constructor for this object"
raise AttributeError(errstr.format(newarg))
argdict[newarg] = kwargs[newarg]
return self.__class__(**argdict)
@abstractmethod
def w(self, z):
""" The dark energy equation of state.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
w : ndarray, or float if input scalar
The dark energy equation of state
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the
pressure at redshift z and :math:`\\rho(z)` is the density
at redshift z, both in units where c=1.
This must be overridden by subclasses.
"""
raise NotImplementedError("w(z) is not implemented")
def Om(self, z):
""" Return the density parameter for non-relativistic matter
at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
Om : ndarray, or float if input scalar
The density of non-relativistic matter relative to the critical
density at each redshift.
Notes
-----
This does not include neutrinos, even if non-relativistic
at the redshift of interest; see `Onu`.
"""
if isiterable(z):
z = np.asarray(z)
return self._Om0 * (1. + z) ** 3 * self.inv_efunc(z) ** 2
def Ob(self, z):
""" Return the density parameter for baryonic matter at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
Ob : ndarray, or float if input scalar
The density of baryonic matter relative to the critical density at
each redshift.
Raises
------
ValueError
If Ob0 is None.
"""
if self._Ob0 is None:
raise ValueError("Baryon density not set for this cosmology")
if isiterable(z):
z = np.asarray(z)
return self._Ob0 * (1. + z) ** 3 * self.inv_efunc(z) ** 2
def Odm(self, z):
""" Return the density parameter for dark matter at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
Odm : ndarray, or float if input scalar
The density of non-relativistic dark matter relative to the critical
density at each redshift.
Raises
------
ValueError
If Ob0 is None.
Notes
-----
This does not include neutrinos, even if non-relativistic
at the redshift of interest.
"""
if self._Odm0 is None:
raise ValueError("Baryonic density not set for this cosmology, "
"unclear meaning of dark matter density")
if isiterable(z):
z = np.asarray(z)
return self._Odm0 * (1. + z) ** 3 * self.inv_efunc(z) ** 2
def Ok(self, z):
""" Return the equivalent density parameter for curvature
at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
Ok : ndarray, or float if input scalar
The equivalent density parameter for curvature at each redshift.
"""
if isiterable(z):
z = np.asarray(z)
# Common enough case to be worth checking explicitly
if self._Ok0 == 0:
return np.zeros(np.asanyarray(z).shape)
else:
if self._Ok0 == 0:
return 0.0
return self._Ok0 * (1. + z) ** 2 * self.inv_efunc(z) ** 2
def Ode(self, z):
""" Return the density parameter for dark energy at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
Ode : ndarray, or float if input scalar
The density of non-relativistic matter relative to the critical
density at each redshift.
"""
if isiterable(z):
z = np.asarray(z)
# Common case worth checking
if self._Ode0 == 0:
return np.zeros(np.asanyarray(z).shape)
else:
if self._Ode0 == 0:
return 0.0
return self._Ode0 * self.de_density_scale(z) * self.inv_efunc(z) ** 2
def Ogamma(self, z):
""" Return the density parameter for photons at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
Ogamma : ndarray, or float if input scalar
The energy density of photons relative to the critical
density at each redshift.
"""
if isiterable(z):
z = np.asarray(z)
return self._Ogamma0 * (1. + z) ** 4 * self.inv_efunc(z) ** 2
def Onu(self, z):
""" Return the density parameter for neutrinos at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
Onu : ndarray, or float if input scalar
The energy density of neutrinos relative to the critical
density at each redshift. Note that this includes their
kinetic energy (if they have mass), so it is not equal to
the commonly used :math:`\\sum \\frac{m_{\\nu}}{94 eV}`,
which does not include kinetic energy.
"""
if isiterable(z):
z = np.asarray(z)
if self._Onu0 == 0:
return np.zeros(np.asanyarray(z).shape)
else:
if self._Onu0 == 0:
return 0.0
return self.Ogamma(z) * self.nu_relative_density(z)
def Tcmb(self, z):
""" Return the CMB temperature at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
Tcmb : `~astropy.units.Quantity`
The temperature of the CMB in K.
"""
if isiterable(z):
z = np.asarray(z)
return self._Tcmb0 * (1. + z)
def Tnu(self, z):
""" Return the neutrino temperature at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
Tnu : `~astropy.units.Quantity`
The temperature of the cosmic neutrino background in K.
"""
if isiterable(z):
z = np.asarray(z)
return self._Tnu0 * (1. + z)
def nu_relative_density(self, z):
""" Neutrino density function relative to the energy density in
photons.
Parameters
----------
z : array like
Redshift
Returns
-------
f : ndarray, or float if z is scalar
The neutrino density scaling factor relative to the density
in photons at each redshift
Notes
-----
The density in neutrinos is given by
.. math::
\\rho_{\\nu} \\left(a\\right) = 0.2271 \\, N_{eff} \\,
f\\left(m_{\\nu} a / T_{\\nu 0} \\right) \\,
\\rho_{\\gamma} \\left( a \\right)
where
.. math::
f \\left(y\\right) = \\frac{120}{7 \\pi^4}
\\int_0^{\\infty} \\, dx \\frac{x^2 \\sqrt{x^2 + y^2}}
{e^x + 1}
assuming that all neutrino species have the same mass.
If they have different masses, a similar term is calculated
for each one. Note that f has the asymptotic behavior :math:`f(0) = 1`.
This method returns :math:`0.2271 f` using an
analytical fitting formula given in Komatsu et al. 2011, ApJS 192, 18.
"""
# Note that there is also a scalar-z-only cython implementation of
# this in scalar_inv_efuncs.pyx, so if you find a problem in this
# you need to update there too.
# See Komatsu et al. 2011, eq 26 and the surrounding discussion
# for an explanation of what we are doing here.
# However, this is modified to handle multiple neutrino masses
# by computing the above for each mass, then summing
prefac = 0.22710731766 # 7/8 (4/11)^4/3 -- see any cosmo book
# The massive and massless contribution must be handled separately
# But check for common cases first
if not self._massivenu:
if np.isscalar(z):
return prefac * self._Neff
else:
return prefac * self._Neff * np.ones(np.asanyarray(z).shape)
# These are purely fitting constants -- see the Komatsu paper
p = 1.83
invp = 0.54644808743 # 1.0 / p
k = 0.3173
z = np.asarray(z)
curr_nu_y = self._nu_y / (1. + np.expand_dims(z, axis=-1))
rel_mass_per = (1.0 + (k * curr_nu_y) ** p) ** invp
rel_mass = rel_mass_per.sum(-1) + self._nmasslessnu
return prefac * self._neff_per_nu * rel_mass
def _w_integrand(self, ln1pz):
""" Internal convenience function for w(z) integral."""
# See Linder 2003, PRL 90, 91301 eq (5)
# Assumes scalar input, since this should only be called
# inside an integral
z = exp(ln1pz) - 1.0
return 1.0 + self.w(z)
def de_density_scale(self, z):
r""" Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
I : ndarray, or float if input scalar
The scaling of the energy density of dark energy with redshift.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and is given by
.. math::
I = \exp \left( 3 \int_{a}^1 \frac{ da^{\prime} }{ a^{\prime} }
\left[ 1 + w\left( a^{\prime} \right) \right] \right)
It will generally helpful for subclasses to overload this method if
the integral can be done analytically for the particular dark
energy equation of state that they implement.
"""
# This allows for an arbitrary w(z) following eq (5) of
# Linder 2003, PRL 90, 91301. The code here evaluates
# the integral numerically. However, most popular
# forms of w(z) are designed to make this integral analytic,
# so it is probably a good idea for subclasses to overload this
# method if an analytic form is available.
#
# The integral we actually use (the one given in Linder)
# is rewritten in terms of z, so looks slightly different than the
# one in the documentation string, but it's the same thing.
from scipy.integrate import quad
if isiterable(z):
z = np.asarray(z)
ival = np.array([quad(self._w_integrand, 0, log(1 + redshift))[0]
for redshift in z])
return np.exp(3 * ival)
else:
ival = quad(self._w_integrand, 0, log(1 + z))[0]
return exp(3 * ival)
def efunc(self, z):
""" Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The redshift scaling of the Hubble constant.
Notes
-----
The return value, E, is defined such that :math:`H(z) = H_0 E`.
It is not necessary to override this method, but if de_density_scale
takes a particularly simple form, it may be advantageous to.
"""
if isiterable(z):
z = np.asarray(z)
Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0
if self._massivenu:
Or = self._Ogamma0 * (1 + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1.0 + z
return np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) +
Ode0 * self.de_density_scale(z))
def inv_efunc(self, z):
"""Inverse of efunc.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The redshift scaling of the inverse Hubble constant.
"""
# Avoid the function overhead by repeating code
if isiterable(z):
z = np.asarray(z)
Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0
if self._massivenu:
Or = self._Ogamma0 * (1 + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1.0 + z
return (zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) +
Ode0 * self.de_density_scale(z))**(-0.5)
def _lookback_time_integrand_scalar(self, z):
""" Integrand of the lookback time.
Parameters
----------
z : float
Input redshift.
Returns
-------
I : float
The integrand for the lookback time
References
----------
Eqn 30 from Hogg 1999.
"""
args = self._inv_efunc_scalar_args
return self._inv_efunc_scalar(z, *args) / (1.0 + z)
def lookback_time_integrand(self, z):
""" Integrand of the lookback time.
Parameters
----------
z : float or array-like
Input redshift.
Returns
-------
I : float or array
The integrand for the lookback time
References
----------
Eqn 30 from Hogg 1999.
"""
if isiterable(z):
zp1 = 1.0 + np.asarray(z)
else:
zp1 = 1. + z
return self.inv_efunc(z) / zp1
def _abs_distance_integrand_scalar(self, z):
""" Integrand of the absorption distance.
Parameters
----------
z : float
Input redshift.
Returns
-------
X : float
The integrand for the absorption distance
References
----------
See Hogg 1999 section 11.
"""
args = self._inv_efunc_scalar_args
return (1.0 + z) ** 2 * self._inv_efunc_scalar(z, *args)
def abs_distance_integrand(self, z):
""" Integrand of the absorption distance.
Parameters
----------
z : float or array
Input redshift.
Returns
-------
X : float or array
The integrand for the absorption distance
References
----------
See Hogg 1999 section 11.
"""
if isiterable(z):
zp1 = 1.0 + np.asarray(z)
else:
zp1 = 1. + z
return zp1 ** 2 * self.inv_efunc(z)
def H(self, z):
""" Hubble parameter (km/s/Mpc) at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
H : `~astropy.units.Quantity`
Hubble parameter at each input redshift.
"""
return self._H0 * self.efunc(z)
def scale_factor(self, z):
""" Scale factor at redshift ``z``.
The scale factor is defined as :math:`a = 1 / (1 + z)`.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
a : ndarray, or float if input scalar
Scale factor at each input redshift.
"""
if isiterable(z):
z = np.asarray(z)
return 1. / (1. + z)
def lookback_time(self, z):
""" Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the
Universe now and the age at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar
Returns
-------
t : `~astropy.units.Quantity`
Lookback time in Gyr to each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to a lookback time.
"""
return self._lookback_time(z)
def _lookback_time(self, z):
""" Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the
Universe now and the age at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar
Returns
-------
t : `~astropy.units.Quantity`
Lookback time in Gyr to each input redshift.
"""
return self._integral_lookback_time(z)
def _integral_lookback_time(self, z):
""" Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the
Universe now and the age at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar
Returns
-------
t : `~astropy.units.Quantity`
Lookback time in Gyr to each input redshift.
"""
from scipy.integrate import quad
f = lambda red: quad(self._lookback_time_integrand_scalar, 0, red)[0]
return self._hubble_time * vectorize_if_needed(f, z)
def lookback_distance(self, z):
"""
The lookback distance is the light travel time distance to a given
redshift. It is simply c * lookback_time. It may be used to calculate
the proper distance between two redshifts, e.g. for the mean free path
to ionizing radiation.
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar
Returns
-------
d : `~astropy.units.Quantity`
Lookback distance in Mpc
"""
return (self.lookback_time(z) * const.c).to(u.Mpc)
def age(self, z):
""" Age of the universe in Gyr at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar.
Returns
-------
t : `~astropy.units.Quantity`
The age of the universe in Gyr at each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to an age.
"""
return self._age(z)
def _age(self, z):
""" Age of the universe in Gyr at redshift ``z``.
This internal function exists to be re-defined for optimizations.
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar.
Returns
-------
t : `~astropy.units.Quantity`
The age of the universe in Gyr at each input redshift.
"""
return self._integral_age(z)
def _integral_age(self, z):
""" Age of the universe in Gyr at redshift ``z``.
Calculated using explicit integration.
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar.
Returns
-------
t : `~astropy.units.Quantity`
The age of the universe in Gyr at each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to an age.
"""
from scipy.integrate import quad
f = lambda red: quad(self._lookback_time_integrand_scalar,
red, np.inf)[0]
return self._hubble_time * vectorize_if_needed(f, z)
def critical_density(self, z):
""" Critical density in grams per cubic cm at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
rho : `~astropy.units.Quantity`
Critical density in g/cm^3 at each input redshift.
"""
return self._critical_density0 * (self.efunc(z)) ** 2
def comoving_distance(self, z):
""" Comoving line-of-sight distance in Mpc at a given
redshift.
The comoving distance along the line-of-sight between two
objects remains constant with time for objects in the Hubble
flow.
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar.
Returns
-------
d : `~astropy.units.Quantity`
Comoving distance in Mpc to each input redshift.
"""
return self._comoving_distance_z1z2(0, z)
def _comoving_distance_z1z2(self, z1, z2):
""" Comoving line-of-sight distance in Mpc between objects at
redshifts z1 and z2.
The comoving distance along the line-of-sight between two
objects remains constant with time for objects in the Hubble
flow.
Parameters
----------
z1, z2 : array-like, shape (N,)
Input redshifts. Must be 1D or scalar.
Returns
-------
d : `~astropy.units.Quantity`
Comoving distance in Mpc between each input redshift.
"""
return self._integral_comoving_distance_z1z2(z1, z2)
def _integral_comoving_distance_z1z2(self, z1, z2):
""" Comoving line-of-sight distance in Mpc between objects at
redshifts z1 and z2.
The comoving distance along the line-of-sight between two
objects remains constant with time for objects in the Hubble
flow.
Parameters
----------
z1, z2 : array-like, shape (N,)
Input redshifts. Must be 1D or scalar.
Returns
-------
d : `~astropy.units.Quantity`
Comoving distance in Mpc between each input redshift.
"""
from scipy.integrate import quad
f = lambda z1, z2: quad(self._inv_efunc_scalar, z1, z2,
args=self._inv_efunc_scalar_args)[0]
return self._hubble_distance * vectorize_if_needed(f, z1, z2)
def comoving_transverse_distance(self, z):
""" Comoving transverse distance in Mpc at a given redshift.
This value is the transverse comoving distance at redshift ``z``
corresponding to an angular separation of 1 radian. This is
the same as the comoving distance if omega_k is zero (as in
the current concordance lambda CDM model).
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar.
Returns
-------
d : `~astropy.units.Quantity`
Comoving transverse distance in Mpc at each input redshift.
Notes
-----
This quantity also called the 'proper motion distance' in some
texts.
"""
return self._comoving_transverse_distance_z1z2(0, z)
def _comoving_transverse_distance_z1z2(self, z1, z2):
"""Comoving transverse distance in Mpc between two redshifts.
This value is the transverse comoving distance at redshift
``z2`` as seen from redshift ``z1`` corresponding to an
angular separation of 1 radian. This is the same as the
comoving distance if omega_k is zero (as in the current
concordance lambda CDM model).
Parameters
----------
z1, z2 : array-like, shape (N,)
Input redshifts. Must be 1D or scalar.
Returns
-------
d : `~astropy.units.Quantity`
Comoving transverse distance in Mpc between input redshift.
Notes
-----
This quantity is also called the 'proper motion distance' in
some texts.
"""
Ok0 = self._Ok0
dc = self._comoving_distance_z1z2(z1, z2)
if Ok0 == 0:
return dc
sqrtOk0 = sqrt(abs(Ok0))
dh = self._hubble_distance
if Ok0 > 0:
return dh / sqrtOk0 * np.sinh(sqrtOk0 * dc.value / dh.value)
else:
return dh / sqrtOk0 * np.sin(sqrtOk0 * dc.value / dh.value)
def angular_diameter_distance(self, z):
""" Angular diameter distance in Mpc at a given redshift.
This gives the proper (sometimes called 'physical') transverse
distance corresponding to an angle of 1 radian for an object
at redshift ``z``.
Weinberg, 1972, pp 421-424; Weedman, 1986, pp 65-67; Peebles,
1993, pp 325-327.
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar.
Returns
-------
d : `~astropy.units.Quantity`
Angular diameter distance in Mpc at each input redshift.
"""
if isiterable(z):
z = np.asarray(z)
return self.comoving_transverse_distance(z) / (1. + z)
def luminosity_distance(self, z):
""" Luminosity distance in Mpc at redshift ``z``.
This is the distance to use when converting between the
bolometric flux from an object at redshift ``z`` and its
bolometric luminosity.
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar.
Returns
-------
d : `~astropy.units.Quantity`
Luminosity distance in Mpc at each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to a luminosity distance.
References
----------
Weinberg, 1972, pp 420-424; Weedman, 1986, pp 60-62.
"""
if isiterable(z):
z = np.asarray(z)
return (1. + z) * self.comoving_transverse_distance(z)
def angular_diameter_distance_z1z2(self, z1, z2):
""" Angular diameter distance between objects at 2 redshifts.
Useful for gravitational lensing.
Parameters
----------
z1, z2 : array-like, shape (N,)
Input redshifts. z2 must be large than z1.
Returns
-------
d : `~astropy.units.Quantity`, shape (N,) or single if input scalar
The angular diameter distance between each input redshift
pair.
"""
z1 = np.asanyarray(z1)
z2 = np.asanyarray(z2)
return self._comoving_transverse_distance_z1z2(z1, z2) / (1. + z2)
def absorption_distance(self, z):
""" Absorption distance at redshift ``z``.
This is used to calculate the number of objects with some
cross section of absorption and number density intersecting a
sightline per unit redshift path.
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar.
Returns
-------
d : float or ndarray
Absorption distance (dimensionless) at each input redshift.
References
----------
Hogg 1999 Section 11. (astro-ph/9905116)
Bahcall, John N. and Peebles, P.J.E. 1969, ApJ, 156L, 7B
"""
from scipy.integrate import quad
f = lambda red: quad(self._abs_distance_integrand_scalar, 0, red)[0]
return vectorize_if_needed(f, z)
def distmod(self, z):
""" Distance modulus at redshift ``z``.
The distance modulus is defined as the (apparent magnitude -
absolute magnitude) for an object at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar.
Returns
-------
distmod : `~astropy.units.Quantity`
Distance modulus at each input redshift, in magnitudes
See Also
--------
z_at_value : Find the redshift corresponding to a distance modulus.
"""
# Remember that the luminosity distance is in Mpc
# Abs is necessary because in certain obscure closed cosmologies
# the distance modulus can be negative -- which is okay because
# it enters as the square.
val = 5. * np.log10(abs(self.luminosity_distance(z).value)) + 25.0
return u.Quantity(val, u.mag)
def comoving_volume(self, z):
""" Comoving volume in cubic Mpc at redshift ``z``.
This is the volume of the universe encompassed by redshifts less
than ``z``. For the case of omega_k = 0 it is a sphere of radius
`comoving_distance` but it is less intuitive
if omega_k is not 0.
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar.
Returns
-------
V : `~astropy.units.Quantity`
Comoving volume in :math:`Mpc^3` at each input redshift.
"""
Ok0 = self._Ok0
if Ok0 == 0:
return 4. / 3. * pi * self.comoving_distance(z) ** 3
dh = self._hubble_distance.value # .value for speed
dm = self.comoving_transverse_distance(z).value
term1 = 4. * pi * dh ** 3 / (2. * Ok0) * u.Mpc ** 3
term2 = dm / dh * np.sqrt(1 + Ok0 * (dm / dh) ** 2)
term3 = sqrt(abs(Ok0)) * dm / dh
if Ok0 > 0:
return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsinh(term3))
else:
return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsin(term3))
def differential_comoving_volume(self, z):
"""Differential comoving volume at redshift z.
Useful for calculating the effective comoving volume.
For example, allows for integration over a comoving volume
that has a sensitivity function that changes with redshift.
The total comoving volume is given by integrating
differential_comoving_volume to redshift z
and multiplying by a solid angle.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
dV : `~astropy.units.Quantity`
Differential comoving volume per redshift per steradian at
each input redshift."""
dh = self._hubble_distance
da = self.angular_diameter_distance(z)
zp1 = 1.0 + z
return dh * ((zp1 * da) ** 2.0) / u.Quantity(self.efunc(z),
u.steradian)
def kpc_comoving_per_arcmin(self, z):
""" Separation in transverse comoving kpc corresponding to an
arcminute at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar.
Returns
-------
d : `~astropy.units.Quantity`
The distance in comoving kpc corresponding to an arcmin at each
input redshift.
"""
return (self.comoving_transverse_distance(z).to(u.kpc) *
arcmin_in_radians / u.arcmin)
def kpc_proper_per_arcmin(self, z):
""" Separation in transverse proper kpc corresponding to an
arcminute at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar.
Returns
-------
d : `~astropy.units.Quantity`
The distance in proper kpc corresponding to an arcmin at each
input redshift.
"""
return (self.angular_diameter_distance(z).to(u.kpc) *
arcmin_in_radians / u.arcmin)
def arcsec_per_kpc_comoving(self, z):
""" Angular separation in arcsec corresponding to a comoving kpc
at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar.
Returns
-------
theta : `~astropy.units.Quantity`
The angular separation in arcsec corresponding to a comoving kpc
at each input redshift.
"""
return u.arcsec / (self.comoving_transverse_distance(z).to(u.kpc) *
arcsec_in_radians)
def arcsec_per_kpc_proper(self, z):
""" Angular separation in arcsec corresponding to a proper kpc at
redshift ``z``.
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar.
Returns
-------
theta : `~astropy.units.Quantity`
The angular separation in arcsec corresponding to a proper kpc
at each input redshift.
"""
return u.arcsec / (self.angular_diameter_distance(z).to(u.kpc) *
arcsec_in_radians)
class LambdaCDM(FLRW):
"""FLRW cosmology with a cosmological constant and curvature.
This has no additional attributes beyond those of FLRW.
Parameters
----------
H0 : float or `~astropy.units.Quantity`
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of the cosmological constant in units of
the critical density at z=0.
Tcmb0 : float or scalar `~astropy.units.Quantity`, optional
Temperature of the CMB z=0. If a float, must be in [K].
Default: 0 [K]. Setting this to zero will turn off both photons
and neutrinos (even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : `~astropy.units.Quantity`, optional
Mass of each neutrino species. If this is a scalar Quantity, then all
neutrino species are assumed to have that mass. Otherwise, the mass of
each species. The actual number of neutrino species (and hence the
number of elements of m_nu if it is not scalar) must be the floor of
Neff. Typically this means you should provide three neutrino masses
unless you are considering something like a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any
computation that requires its value will raise an exception.
name : str, optional
Name for this cosmological object.
Examples
--------
>>> from astropy.cosmology import LambdaCDM
>>> cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, Ode0, Tcmb0=0, Neff=3.04,
m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None):
FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name,
Ob0=Ob0)
# Please see "Notes about speeding up integrals" for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0)
if self._Ok0 == 0:
self._optimize_flat_norad()
else:
self._comoving_distance_z1z2 = \
self._elliptic_comoving_distance_z1z2
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0 + self._Onu0)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list)
def _optimize_flat_norad(self):
"""Set optimizations for flat LCDM cosmologies with no radiation.
"""
# Call out the Om0=0 (de Sitter) and Om0=1 (Einstein-de Sitter)
# The dS case is required because the hypergeometric case
# for Omega_M=0 would lead to an infinity in its argument.
# The EdS case is three times faster than the hypergeometric.
if self._Om0 == 0:
self._comoving_distance_z1z2 = \
self._dS_comoving_distance_z1z2
self._age = self._dS_age
self._lookback_time = self._dS_lookback_time
elif self._Om0 == 1:
self._comoving_distance_z1z2 = \
self._EdS_comoving_distance_z1z2
self._age = self._EdS_age
self._lookback_time = self._EdS_lookback_time
else:
self._comoving_distance_z1z2 = \
self._hypergeometric_comoving_distance_z1z2
self._age = self._flat_age
self._lookback_time = self._flat_lookback_time
def w(self, z):
"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
w : ndarray, or float if input scalar
The dark energy equation of state
Notes
------
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the
pressure at redshift z and :math:`\\rho(z)` is the density
at redshift z, both in units where c=1. Here this is
:math:`w(z) = -1`.
"""
if np.isscalar(z):
return -1.0
else:
return -1.0 * np.ones(np.asanyarray(z).shape)
def de_density_scale(self, z):
""" Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
I : ndarray, or float if input scalar
The scaling of the energy density of dark energy with redshift.
Notes
-----
The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`,
and in this case is given by :math:`I = 1`.
"""
if np.isscalar(z):
return 1.
else:
return np.ones(np.asanyarray(z).shape)
def _elliptic_comoving_distance_z1z2(self, z1, z2):
""" Comoving transverse distance in Mpc between two redshifts.
This value is the transverse comoving distance at redshift ``z``
corresponding to an angular separation of 1 radian. This is
the same as the comoving distance if omega_k is zero.
For Omega_rad = 0 the comoving distance can be directly calculated
as an elliptic integral.
Equation here taken from
Kantowski, Kao, and Thomas, arXiv:0002334
Not valid or appropriate for flat cosmologies (Ok0=0).
Parameters
----------
z1, z2 : array-like
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity`
Comoving distance in Mpc between each input redshift.
"""
from scipy.special import ellipkinc
if isiterable(z1):
z1 = np.asarray(z1)
if isiterable(z2):
z2 = np.asarray(z2)
if isiterable(z1) and isiterable(z2):
if z1.shape != z2.shape:
msg = "z1 and z2 have different shapes"
raise ValueError(msg)
# The analytic solution is not valid for any of Om0, Ode0, Ok0 == 0.
# Use the explicit integral solution for these cases.
if self._Om0 == 0 or self._Ode0 == 0 or self._Ok0 == 0:
return self._integral_comoving_distance_z1z2(z1, z2)
b = -(27. / 2) * self._Om0**2 * self._Ode0 / self._Ok0**3
kappa = b / abs(b)
if (b < 0) or (2 < b):
def phi_z(Om0, Ok0, kappa, y1, A, z):
return np.arccos(((1 + z) * Om0 / abs(Ok0) + kappa * y1 - A) /
((1 + z) * Om0 / abs(Ok0) + kappa * y1 + A))
v_k = pow(kappa * (b - 1) + sqrt(b * (b - 2)), 1. / 3)
y1 = (-1 + kappa * (v_k + 1 / v_k)) / 3
A = sqrt(y1 * (3 * y1 + 2))
g = 1 / sqrt(A)
k2 = (2 * A + kappa * (1 + 3 * y1)) / (4 * A)
phi_z1 = phi_z(self._Om0, self._Ok0, kappa, y1, A, z1)
phi_z2 = phi_z(self._Om0, self._Ok0, kappa, y1, A, z2)
# Get lower-right 0<b<2 solution in Om0, Ode0 plane.
# Fot the upper-left 0<b<2 solution the Big Bang didn't happen.
elif (0 < b) and (b < 2) and self._Om0 > self._Ode0:
def phi_z(Om0, Ok0, y1, y2, z):
return np.arcsin(np.sqrt((y1 - y2) /
((1 + z) * Om0 / abs(Ok0) + y1)))
yb = cos(acos(1 - b) / 3)
yc = sqrt(3) * sin(acos(1 - b) / 3)
y1 = (1. / 3) * (-1 + yb + yc)
y2 = (1. / 3) * (-1 - 2 * yb)
y3 = (1. / 3) * (-1 + yb - yc)
g = 2 / sqrt(y1 - y2)
k2 = (y1 - y3) / (y1 - y2)
phi_z1 = phi_z(self._Om0, self._Ok0, y1, y2, z1)
phi_z2 = phi_z(self._Om0, self._Ok0, y1, y2, z2)
else:
return self._integral_comoving_distance_z1z2(z1, z2)
prefactor = self._hubble_distance / sqrt(abs(self._Ok0))
return prefactor * g * (ellipkinc(phi_z1, k2) - ellipkinc(phi_z2, k2))
def _dS_comoving_distance_z1z2(self, z1, z2):
""" Comoving line-of-sight distance in Mpc between objects at redshifts
z1 and z2 in a flat, Omega_Lambda=1 cosmology (de Sitter).
The comoving distance along the line-of-sight between two
objects remains constant with time for objects in the Hubble
flow.
The de Sitter case has an analytic solution.
Parameters
----------
z1, z2 : array-like, shape (N,)
Input redshifts. Must be 1D or scalar.
Returns
-------
d : `~astropy.units.Quantity`
Comoving distance in Mpc between each input redshift.
"""
if isiterable(z1):
z1 = np.asarray(z1)
z2 = np.asarray(z2)
if z1.shape != z2.shape:
msg = "z1 and z2 have different shapes"
raise ValueError(msg)
return self._hubble_distance * (z2 - z1)
def _EdS_comoving_distance_z1z2(self, z1, z2):
""" Comoving line-of-sight distance in Mpc between objects at redshifts
z1 and z2 in a flat, Omega_M=1 cosmology (Einstein - de Sitter).
The comoving distance along the line-of-sight between two
objects remains constant with time for objects in the Hubble
flow.
For OM=1, Omega_rad=0 the comoving distance has an analytic solution.
Parameters
----------
z1, z2 : array-like, shape (N,)
Input redshifts. Must be 1D or scalar.
Returns
-------
d : `~astropy.units.Quantity`
Comoving distance in Mpc between each input redshift.
"""
if isiterable(z1):
z1 = np.asarray(z1)
z2 = np.asarray(z2)
if z1.shape != z2.shape:
msg = "z1 and z2 have different shapes"
raise ValueError(msg)
prefactor = 2 * self._hubble_distance
return prefactor * ((1+z1)**(-1./2) - (1+z2)**(-1./2))
def _hypergeometric_comoving_distance_z1z2(self, z1, z2):
""" Comoving line-of-sight distance in Mpc between objects at
redshifts z1 and z2.
The comoving distance along the line-of-sight between two
objects remains constant with time for objects in the Hubble
flow.
For Omega_radiation = 0 the comoving distance can be directly calculated
as a hypergeometric function.
Equation here taken from
Baes, Camps, Van De Putte, 2017, MNRAS, 468, 927.
Parameters
----------
z1, z2 : array-like
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity`
Comoving distance in Mpc between each input redshift.
"""
if isiterable(z1):
z1 = np.asarray(z1)
z2 = np.asarray(z2)
if z1.shape != z2.shape:
msg = "z1 and z2 have different shapes"
raise ValueError(msg)
s = ((1 - self._Om0) / self._Om0) ** (1./3)
# Use np.sqrt here to handle negative s (Om0>1).
prefactor = self._hubble_distance / np.sqrt(s * self._Om0)
return prefactor * (self._T_hypergeometric(s / (1 + z1)) -
self._T_hypergeometric(s / (1 + z2)))
def _T_hypergeometric(self, x):
""" Compute T_hypergeometric(x) using Gauss Hypergeometric function 2F1
T(x) = 2 \\sqrt(x) _{2}F_{1} \\left(\\frac{1}{6}, \\frac{1}{2}; \\frac{7}{6}; -x^3)
Note:
The scipy.special.hyp2f1 code already implements the hypergeometric
transformation suggested by
Baes, Camps, Van De Putte, 2017, MNRAS, 468, 927.
for use in actual numerical evaulations.
"""
from scipy.special import hyp2f1
return 2 * np.sqrt(x) * hyp2f1(1./6, 1./2, 7./6, -x**3)
def _dS_age(self, z):
""" Age of the universe in Gyr at redshift ``z``.
The age of a de Sitter Universe is infinite.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
t : `~astropy.units.Quantity`
The age of the universe in Gyr at each input redshift.
"""
return self._hubble_time * inf_like(z)
def _EdS_age(self, z):
""" Age of the universe in Gyr at redshift ``z``.
For Omega_radiation = 0 (T_CMB = 0; massless neutrinos)
the age can be directly calculated as an elliptic integral.
See, e.g.,
Thomas and Kantowski, arXiv:0003463
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
t : `~astropy.units.Quantity`
The age of the universe in Gyr at each input redshift.
"""
if isiterable(z):
z = np.asarray(z)
return (2./3) * self._hubble_time * (1+z)**(-3./2)
def _flat_age(self, z):
""" Age of the universe in Gyr at redshift ``z``.
For Omega_radiation = 0 (T_CMB = 0; massless neutrinos)
the age can be directly calculated as an elliptic integral.
See, e.g.,
Thomas and Kantowski, arXiv:0003463
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
t : `~astropy.units.Quantity`
The age of the universe in Gyr at each input redshift.
"""
if isiterable(z):
z = np.asarray(z)
# Use np.sqrt, np.arcsinh instead of math.sqrt, math.asinh
# to handle properly the complex numbers for 1 - Om0 < 0
prefactor = (2./3) * self._hubble_time / \
np.lib.scimath.sqrt(1 - self._Om0)
arg = np.arcsinh(np.lib.scimath.sqrt((1 / self._Om0 - 1 + 0j) /
(1 + z)**3))
return (prefactor * arg).real
def _EdS_lookback_time(self, z):
""" Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the
Universe now and the age at redshift ``z``.
For Omega_radiation = 0 (T_CMB = 0; massless neutrinos)
the age can be directly calculated as an elliptic integral.
The lookback time is here calculated based on the age(0) - age(z)
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar
Returns
-------
t : `~astropy.units.Quantity`
Lookback time in Gyr to each input redshift.
"""
return self._EdS_age(0) - self._EdS_age(z)
def _dS_lookback_time(self, z):
""" Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the
Universe now and the age at redshift ``z``.
For Omega_radiation = 0 (T_CMB = 0; massless neutrinos)
the age can be directly calculated.
a = exp(H * t) where t=0 at z=0
t = (1/H) (ln 1 - ln a) = (1/H) (0 - ln (1/(1+z))) = (1/H) ln(1+z)
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
t : `~astropy.units.Quantity`
Lookback time in Gyr to each input redshift.
"""
if isiterable(z):
z = np.asarray(z)
return self._hubble_time * np.log(1+z)
def _flat_lookback_time(self, z):
""" Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the
Universe now and the age at redshift ``z``.
For Omega_radiation = 0 (T_CMB = 0; massless neutrinos)
the age can be directly calculated.
The lookback time is here calculated based on the age(0) - age(z)
Parameters
----------
z : array-like
Input redshifts. Must be 1D or scalar
Returns
-------
t : `~astropy.units.Quantity`
Lookback time in Gyr to each input redshift.
"""
return self._flat_age(0) - self._flat_age(z)
def efunc(self, z):
""" Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The redshift scaling of the Hubble constant.
Notes
-----
The return value, E, is defined such that :math:`H(z) = H_0 E`.
"""
if isiterable(z):
z = np.asarray(z)
# We override this because it takes a particularly simple
# form for a cosmological constant
Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0
if self._massivenu:
Or = self._Ogamma0 * (1. + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1.0 + z
return np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0)
def inv_efunc(self, z):
r""" Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The inverse redshift scaling of the Hubble constant.
Notes
-----
The return value, E, is defined such that :math:`H_z = H_0 /
E`.
"""
if isiterable(z):
z = np.asarray(z)
Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0
if self._massivenu:
Or = self._Ogamma0 * (1 + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1.0 + z
return (zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0)**(-0.5)
class FlatLambdaCDM(LambdaCDM):
"""FLRW cosmology with a cosmological constant and no curvature.
This has no additional attributes beyond those of FLRW.
Parameters
----------
H0 : float or `~astropy.units.Quantity`
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Tcmb0 : float or scalar `~astropy.units.Quantity`, optional
Temperature of the CMB z=0. If a float, must be in [K].
Default: 0 [K]. Setting this to zero will turn off both photons
and neutrinos (even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : `~astropy.units.Quantity`, optional
Mass of each neutrino species. If this is a scalar Quantity, then all
neutrino species are assumed to have that mass. Otherwise, the mass of
each species. The actual number of neutrino species (and hence the
number of elements of m_nu if it is not scalar) must be the floor of
Neff. Typically this means you should provide three neutrino masses
unless you are considering something like a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any
computation that requires its value will raise an exception.
name : str, optional
Name for this cosmological object.
Examples
--------
>>> from astropy.cosmology import FlatLambdaCDM
>>> cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, Tcmb0=0, Neff=3.04,
m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None):
LambdaCDM.__init__(self, H0, Om0, 0.0, Tcmb0, Neff, m_nu, name=name,
Ob0=Ob0)
# Do some twiddling after the fact to get flatness
self._Ode0 = 1.0 - self._Om0 - self._Ogamma0 - self._Onu0
self._Ok0 = 0.0
# Please see "Notes about speeding up integrals" for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0)
# Repeat the optimization reassignments here because the init
# of the LambaCDM above didn't actually create a flat cosmology.
# That was done through the explicit tweak setting self._Ok0.
self._optimize_flat_norad()
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0 + self._Onu0)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list)
def efunc(self, z):
""" Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The redshift scaling of the Hubble constant.
Notes
-----
The return value, E, is defined such that :math:`H(z) = H_0 E`.
"""
if isiterable(z):
z = np.asarray(z)
# We override this because it takes a particularly simple
# form for a cosmological constant
Om0, Ode0 = self._Om0, self._Ode0
if self._massivenu:
Or = self._Ogamma0 * (1 + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1.0 + z
return np.sqrt(zp1 ** 3 * (Or * zp1 + Om0) + Ode0)
def inv_efunc(self, z):
r"""Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The inverse redshift scaling of the Hubble constant.
Notes
-----
The return value, E, is defined such that :math:`H_z = H_0 / E`.
"""
if isiterable(z):
z = np.asarray(z)
Om0, Ode0 = self._Om0, self._Ode0
if self._massivenu:
Or = self._Ogamma0 * (1. + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1.0 + z
return (zp1 ** 3 * (Or * zp1 + Om0) + Ode0)**(-0.5)
def __repr__(self):
retstr = "{0}H0={1:.3g}, Om0={2:.3g}, Tcmb0={3:.4g}, "\
"Neff={4:.3g}, m_nu={5}, Ob0={6:s})"
return retstr.format(self._namelead(), self._H0, self._Om0,
self._Tcmb0, self._Neff, self.m_nu,
_float_or_none(self._Ob0))
class wCDM(FLRW):
"""FLRW cosmology with a constant dark energy equation of state
and curvature.
This has one additional attribute beyond those of FLRW.
Parameters
----------
H0 : float or `~astropy.units.Quantity`
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
w0 : float, optional
Dark energy equation of state at all redshifts. This is
pressure/density for dark energy in units where c=1. A cosmological
constant has w0=-1.0.
Tcmb0 : float or scalar `~astropy.units.Quantity`, optional
Temperature of the CMB z=0. If a float, must be in [K].
Default: 0 [K]. Setting this to zero will turn off both photons
and neutrinos (even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : `~astropy.units.Quantity`, optional
Mass of each neutrino species. If this is a scalar Quantity, then all
neutrino species are assumed to have that mass. Otherwise, the mass of
each species. The actual number of neutrino species (and hence the
number of elements of m_nu if it is not scalar) must be the floor of
Neff. Typically this means you should provide three neutrino masses
unless you are considering something like a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any
computation that requires its value will raise an exception.
name : str, optional
Name for this cosmological object.
Examples
--------
>>> from astropy.cosmology import wCDM
>>> cosmo = wCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, Ode0, w0=-1., Tcmb0=0,
Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None):
FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name,
Ob0=Ob0)
self._w0 = float(w0)
# Please see "Notes about speeding up integrals" for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._w0)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0 + self._Onu0,
self._w0)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0)
@property
def w0(self):
""" Dark energy equation of state"""
return self._w0
def w(self, z):
"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
w : ndarray, or float if input scalar
The dark energy equation of state
Notes
------
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the
pressure at redshift z and :math:`\\rho(z)` is the density
at redshift z, both in units where c=1. Here this is
:math:`w(z) = w_0`.
"""
if np.isscalar(z):
return self._w0
else:
return self._w0 * np.ones(np.asanyarray(z).shape)
def de_density_scale(self, z):
""" Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
I : ndarray, or float if input scalar
The scaling of the energy density of dark energy with redshift.
Notes
-----
The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`,
and in this case is given by
:math:`I = \\left(1 + z\\right)^{3\\left(1 + w_0\\right)}`
"""
if isiterable(z):
z = np.asarray(z)
return (1. + z) ** (3. * (1. + self._w0))
def efunc(self, z):
""" Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The redshift scaling of the Hubble constant.
Notes
-----
The return value, E, is defined such that :math:`H(z) = H_0 E`.
"""
if isiterable(z):
z = np.asarray(z)
Om0, Ode0, Ok0, w0 = self._Om0, self._Ode0, self._Ok0, self._w0
if self._massivenu:
Or = self._Ogamma0 * (1. + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1.0 + z
return np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) +
Ode0 * zp1 ** (3. * (1. + w0)))
def inv_efunc(self, z):
r""" Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The inverse redshift scaling of the Hubble constant.
Notes
-----
The return value, E, is defined such that :math:`H_z = H_0 / E`.
"""
if isiterable(z):
z = np.asarray(z)
Om0, Ode0, Ok0, w0 = self._Om0, self._Ode0, self._Ok0, self._w0
if self._massivenu:
Or = self._Ogamma0 * (1. + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1.0 + z
return (zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) +
Ode0 * zp1 ** (3. * (1. + w0)))**(-0.5)
def __repr__(self):
retstr = "{0}H0={1:.3g}, Om0={2:.3g}, Ode0={3:.3g}, w0={4:.3g}, "\
"Tcmb0={5:.4g}, Neff={6:.3g}, m_nu={7}, Ob0={8:s})"
return retstr.format(self._namelead(), self._H0, self._Om0,
self._Ode0, self._w0, self._Tcmb0, self._Neff,
self.m_nu, _float_or_none(self._Ob0))
class FlatwCDM(wCDM):
"""FLRW cosmology with a constant dark energy equation of state
and no spatial curvature.
This has one additional attribute beyond those of FLRW.
Parameters
----------
H0 : float or `~astropy.units.Quantity`
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
w0 : float, optional
Dark energy equation of state at all redshifts. This is
pressure/density for dark energy in units where c=1. A cosmological
constant has w0=-1.0.
Tcmb0 : float or scalar `~astropy.units.Quantity`, optional
Temperature of the CMB z=0. If a float, must be in [K].
Default: 0 [K]. Setting this to zero will turn off both photons
and neutrinos (even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : `~astropy.units.Quantity`, optional
Mass of each neutrino species. If this is a scalar Quantity, then all
neutrino species are assumed to have that mass. Otherwise, the mass of
each species. The actual number of neutrino species (and hence the
number of elements of m_nu if it is not scalar) must be the floor of
Neff. Typically this means you should provide three neutrino masses
unless you are considering something like a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any
computation that requires its value will raise an exception.
name : str, optional
Name for this cosmological object.
Examples
--------
>>> from astropy.cosmology import FlatwCDM
>>> cosmo = FlatwCDM(H0=70, Om0=0.3, w0=-0.9)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, w0=-1., Tcmb0=0,
Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None):
wCDM.__init__(self, H0, Om0, 0.0, w0, Tcmb0, Neff, m_nu,
name=name, Ob0=Ob0)
# Do some twiddling after the fact to get flatness
self._Ode0 = 1.0 - self._Om0 - self._Ogamma0 - self._Onu0
self._Ok0 = 0.0
# Please see "Notes about speeding up integrals" for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._w0)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0 + self._Onu0,
self._w0)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0)
def efunc(self, z):
""" Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The redshift scaling of the Hubble constant.
Notes
-----
The return value, E, is defined such that :math:`H(z) = H_0 E`.
"""
if isiterable(z):
z = np.asarray(z)
Om0, Ode0, w0 = self._Om0, self._Ode0, self._w0
if self._massivenu:
Or = self._Ogamma0 * (1. + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1. + z
return np.sqrt(zp1 ** 3 * (Or * zp1 + Om0) +
Ode0 * zp1 ** (3. * (1 + w0)))
def inv_efunc(self, z):
r""" Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
E : ndarray, or float if input scalar
The inverse redshift scaling of the Hubble constant.
Notes
-----
The return value, E, is defined such that :math:`H_z = H_0 / E`.
"""
if isiterable(z):
z = np.asarray(z)
Om0, Ode0, w0 = self._Om0, self._Ode0, self._w0
if self._massivenu:
Or = self._Ogamma0 * (1. + self.nu_relative_density(z))
else:
Or = self._Ogamma0 + self._Onu0
zp1 = 1. + z
return (zp1 ** 3 * (Or * zp1 + Om0) +
Ode0 * zp1 ** (3. * (1. + w0)))**(-0.5)
def __repr__(self):
retstr = "{0}H0={1:.3g}, Om0={2:.3g}, w0={3:.3g}, Tcmb0={4:.4g}, "\
"Neff={5:.3g}, m_nu={6}, Ob0={7:s})"
return retstr.format(self._namelead(), self._H0, self._Om0, self._w0,
self._Tcmb0, self._Neff, self.m_nu,
_float_or_none(self._Ob0))
class w0waCDM(FLRW):
"""FLRW cosmology with a CPL dark energy equation of state and curvature.
The equation for the dark energy equation of state uses the
CPL form as described in Chevallier & Polarski Int. J. Mod. Phys.
D10, 213 (2001) and Linder PRL 90, 91301 (2003):
:math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`.
Parameters
----------
H0 : float or `~astropy.units.Quantity`
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
w0 : float, optional
Dark energy equation of state at z=0 (a=1). This is pressure/density
for dark energy in units where c=1.
wa : float, optional
Negative derivative of the dark energy equation of state with respect
to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0.
Tcmb0 : float or scalar `~astropy.units.Quantity`, optional
Temperature of the CMB z=0. If a float, must be in [K].
Default: 0 [K]. Setting this to zero will turn off both photons
and neutrinos (even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : `~astropy.units.Quantity`, optional
Mass of each neutrino species. If this is a scalar Quantity, then all
neutrino species are assumed to have that mass. Otherwise, the mass of
each species. The actual number of neutrino species (and hence the
number of elements of m_nu if it is not scalar) must be the floor of
Neff. Typically this means you should provide three neutrino masses
unless you are considering something like a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any
computation that requires its value will raise an exception.
name : str, optional
Name for this cosmological object.
Examples
--------
>>> from astropy.cosmology import w0waCDM
>>> cosmo = w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.2)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, Ode0, w0=-1., wa=0., Tcmb0=0,
Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None):
FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name,
Ob0=Ob0)
self._w0 = float(w0)
self._wa = float(wa)
# Please see "Notes about speeding up integrals" for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._w0, self._wa)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0 + self._Onu0,
self._w0, self._wa)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0,
self._wa)
@property
def w0(self):
""" Dark energy equation of state at z=0"""
return self._w0
@property
def wa(self):
""" Negative derivative of dark energy equation of state w.r.t. a"""
return self._wa
def w(self, z):
"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
w : ndarray, or float if input scalar
The dark energy equation of state
Notes
------
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the
pressure at redshift z and :math:`\\rho(z)` is the density
at redshift z, both in units where c=1. Here this is
:math:`w(z) = w_0 + w_a (1 - a) = w_0 + w_a \\frac{z}{1+z}`.
"""
if isiterable(z):
z = np.asarray(z)
return self._w0 + self._wa * z / (1.0 + z)
def de_density_scale(self, z):
r""" Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
I : ndarray, or float if input scalar
The scaling of the energy density of dark energy with redshift.
Notes
-----
The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`,
and in this case is given by
.. math::
I = \left(1 + z\right)^{3 \left(1 + w_0 + w_a\right)}
\exp \left(-3 w_a \frac{z}{1+z}\right)
"""
if isiterable(z):
z = np.asarray(z)
zp1 = 1.0 + z
return zp1 ** (3 * (1 + self._w0 + self._wa)) * \
np.exp(-3 * self._wa * z / zp1)
def __repr__(self):
retstr = "{0}H0={1:.3g}, Om0={2:.3g}, "\
"Ode0={3:.3g}, w0={4:.3g}, wa={5:.3g}, Tcmb0={6:.4g}, "\
"Neff={7:.3g}, m_nu={8}, Ob0={9:s})"
return retstr.format(self._namelead(), self._H0, self._Om0,
self._Ode0, self._w0, self._wa,
self._Tcmb0, self._Neff, self.m_nu,
_float_or_none(self._Ob0))
class Flatw0waCDM(w0waCDM):
"""FLRW cosmology with a CPL dark energy equation of state and no
curvature.
The equation for the dark energy equation of state uses the
CPL form as described in Chevallier & Polarski Int. J. Mod. Phys.
D10, 213 (2001) and Linder PRL 90, 91301 (2003):
:math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`.
Parameters
----------
H0 : float or `~astropy.units.Quantity`
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
w0 : float, optional
Dark energy equation of state at z=0 (a=1). This is pressure/density
for dark energy in units where c=1.
wa : float, optional
Negative derivative of the dark energy equation of state with respect
to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0.
Tcmb0 : float or scalar `~astropy.units.Quantity`, optional
Temperature of the CMB z=0. If a float, must be in [K].
Default: 0 [K]. Setting this to zero will turn off both photons
and neutrinos (even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : `~astropy.units.Quantity`, optional
Mass of each neutrino species. If this is a scalar Quantity, then all
neutrino species are assumed to have that mass. Otherwise, the mass of
each species. The actual number of neutrino species (and hence the
number of elements of m_nu if it is not scalar) must be the floor of
Neff. Typically this means you should provide three neutrino masses
unless you are considering something like a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any
computation that requires its value will raise an exception.
name : str, optional
Name for this cosmological object.
Examples
--------
>>> from astropy.cosmology import Flatw0waCDM
>>> cosmo = Flatw0waCDM(H0=70, Om0=0.3, w0=-0.9, wa=0.2)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, w0=-1., wa=0., Tcmb0=0,
Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None):
w0waCDM.__init__(self, H0, Om0, 0.0, w0=w0, wa=wa, Tcmb0=Tcmb0,
Neff=Neff, m_nu=m_nu, name=name, Ob0=Ob0)
# Do some twiddling after the fact to get flatness
self._Ode0 = 1.0 - self._Om0 - self._Ogamma0 - self._Onu0
self._Ok0 = 0.0
# Please see "Notes about speeding up integrals" for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._w0, self._wa)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0 + self._Onu0,
self._w0, self._wa)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0,
self._wa)
def __repr__(self):
retstr = "{0}H0={1:.3g}, Om0={2:.3g}, "\
"w0={3:.3g}, Tcmb0={4:.4g}, Neff={5:.3g}, m_nu={6}, "\
"Ob0={7:s})"
return retstr.format(self._namelead(), self._H0, self._Om0, self._w0,
self._Tcmb0, self._Neff, self.m_nu,
_float_or_none(self._Ob0))
class wpwaCDM(FLRW):
"""FLRW cosmology with a CPL dark energy equation of state, a pivot
redshift, and curvature.
The equation for the dark energy equation of state uses the
CPL form as described in Chevallier & Polarski Int. J. Mod. Phys.
D10, 213 (2001) and Linder PRL 90, 91301 (2003), but modified
to have a pivot redshift as in the findings of the Dark Energy
Task Force (Albrecht et al. arXiv:0901.0721 (2009)):
:math:`w(a) = w_p + w_a (a_p - a) = w_p + w_a( 1/(1+zp) - 1/(1+z) )`.
Parameters
----------
H0 : float or `~astropy.units.Quantity`
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
wp : float, optional
Dark energy equation of state at the pivot redshift zp. This is
pressure/density for dark energy in units where c=1.
wa : float, optional
Negative derivative of the dark energy equation of state with respect
to the scale factor. A cosmological constant has wp=-1.0 and wa=0.0.
zp : float, optional
Pivot redshift -- the redshift where w(z) = wp
Tcmb0 : float or scalar `~astropy.units.Quantity`, optional
Temperature of the CMB z=0. If a float, must be in [K].
Default: 0 [K]. Setting this to zero will turn off both photons
and neutrinos (even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : `~astropy.units.Quantity`, optional
Mass of each neutrino species. If this is a scalar Quantity, then all
neutrino species are assumed to have that mass. Otherwise, the mass of
each species. The actual number of neutrino species (and hence the
number of elements of m_nu if it is not scalar) must be the floor of
Neff. Typically this means you should provide three neutrino masses
unless you are considering something like a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any
computation that requires its value will raise an exception.
name : str, optional
Name for this cosmological object.
Examples
--------
>>> from astropy.cosmology import wpwaCDM
>>> cosmo = wpwaCDM(H0=70, Om0=0.3, Ode0=0.7, wp=-0.9, wa=0.2, zp=0.4)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, Ode0, wp=-1., wa=0., zp=0,
Tcmb0=0, Neff=3.04, m_nu=u.Quantity(0.0, u.eV),
Ob0=None, name=None):
FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name,
Ob0=Ob0)
self._wp = float(wp)
self._wa = float(wa)
self._zp = float(zp)
# Please see "Notes about speeding up integrals" for discussion
# about what is being done here.
apiv = 1.0 / (1.0 + self._zp)
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._wp, apiv, self._wa)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0 + self._Onu0,
self._wp, apiv, self._wa)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._wp,
apiv, self._wa)
@property
def wp(self):
""" Dark energy equation of state at the pivot redshift zp"""
return self._wp
@property
def wa(self):
""" Negative derivative of dark energy equation of state w.r.t. a"""
return self._wa
@property
def zp(self):
""" The pivot redshift, where w(z) = wp"""
return self._zp
def w(self, z):
"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
w : ndarray, or float if input scalar
The dark energy equation of state
Notes
------
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the
pressure at redshift z and :math:`\\rho(z)` is the density
at redshift z, both in units where c=1. Here this is
:math:`w(z) = w_p + w_a (a_p - a)` where :math:`a = 1/1+z`
and :math:`a_p = 1 / 1 + z_p`.
"""
if isiterable(z):
z = np.asarray(z)
apiv = 1.0 / (1.0 + self._zp)
return self._wp + self._wa * (apiv - 1.0 / (1. + z))
def de_density_scale(self, z):
r""" Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
I : ndarray, or float if input scalar
The scaling of the energy density of dark energy with redshift.
Notes
-----
The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`,
and in this case is given by
.. math::
a_p = \frac{1}{1 + z_p}
I = \left(1 + z\right)^{3 \left(1 + w_p + a_p w_a\right)}
\exp \left(-3 w_a \frac{z}{1+z}\right)
"""
if isiterable(z):
z = np.asarray(z)
zp1 = 1. + z
apiv = 1. / (1. + self._zp)
return zp1 ** (3. * (1. + self._wp + apiv * self._wa)) * \
np.exp(-3. * self._wa * z / zp1)
def __repr__(self):
retstr = "{0}H0={1:.3g}, Om0={2:.3g}, Ode0={3:.3g}, wp={4:.3g}, "\
"wa={5:.3g}, zp={6:.3g}, Tcmb0={7:.4g}, Neff={8:.3g}, "\
"m_nu={9}, Ob0={10:s})"
return retstr.format(self._namelead(), self._H0, self._Om0,
self._Ode0, self._wp, self._wa, self._zp,
self._Tcmb0, self._Neff, self.m_nu,
_float_or_none(self._Ob0))
class w0wzCDM(FLRW):
"""FLRW cosmology with a variable dark energy equation of state
and curvature.
The equation for the dark energy equation of state uses the
simple form: :math:`w(z) = w_0 + w_z z`.
This form is not recommended for z > 1.
Parameters
----------
H0 : float or `~astropy.units.Quantity`
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
w0 : float, optional
Dark energy equation of state at z=0. This is pressure/density for
dark energy in units where c=1.
wz : float, optional
Derivative of the dark energy equation of state with respect to z.
A cosmological constant has w0=-1.0 and wz=0.0.
Tcmb0 : float or scalar `~astropy.units.Quantity`, optional
Temperature of the CMB z=0. If a float, must be in [K].
Default: 0 [K]. Setting this to zero will turn off both photons
and neutrinos (even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : `~astropy.units.Quantity`, optional
Mass of each neutrino species. If this is a scalar Quantity, then all
neutrino species are assumed to have that mass. Otherwise, the mass of
each species. The actual number of neutrino species (and hence the
number of elements of m_nu if it is not scalar) must be the floor of
Neff. Typically this means you should provide three neutrino masses
unless you are considering something like a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any
computation that requires its value will raise an exception.
name : str, optional
Name for this cosmological object.
Examples
--------
>>> from astropy.cosmology import w0wzCDM
>>> cosmo = w0wzCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wz=0.2)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, Ode0, w0=-1., wz=0., Tcmb0=0,
Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None,
name=None):
FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name,
Ob0=Ob0)
self._w0 = float(w0)
self._wz = float(wz)
# Please see "Notes about speeding up integrals" for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._w0, self._wz)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0 + self._Onu0,
self._w0, self._wz)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0,
self._wz)
@property
def w0(self):
""" Dark energy equation of state at z=0"""
return self._w0
@property
def wz(self):
""" Derivative of the dark energy equation of state w.r.t. z"""
return self._wz
def w(self, z):
"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
w : ndarray, or float if input scalar
The dark energy equation of state
Notes
------
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the
pressure at redshift z and :math:`\\rho(z)` is the density
at redshift z, both in units where c=1. Here this is given by
:math:`w(z) = w_0 + w_z z`.
"""
if isiterable(z):
z = np.asarray(z)
return self._w0 + self._wz * z
def de_density_scale(self, z):
r""" Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
I : ndarray, or float if input scalar
The scaling of the energy density of dark energy with redshift.
Notes
-----
The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`,
and in this case is given by
.. math::
I = \left(1 + z\right)^{3 \left(1 + w_0 - w_z\right)}
\exp \left(-3 w_z z\right)
"""
if isiterable(z):
z = np.asarray(z)
zp1 = 1. + z
return zp1 ** (3. * (1. + self._w0 - self._wz)) *\
np.exp(-3. * self._wz * z)
def __repr__(self):
retstr = "{0}H0={1:.3g}, Om0={2:.3g}, "\
"Ode0={3:.3g}, w0={4:.3g}, wz={5:.3g} Tcmb0={6:.4g}, "\
"Neff={7:.3g}, m_nu={8}, Ob0={9:s})"
return retstr.format(self._namelead(), self._H0, self._Om0,
self._Ode0, self._w0, self._wz, self._Tcmb0,
self._Neff, self.m_nu, _float_or_none(self._Ob0))
def _float_or_none(x, digits=3):
""" Helper function to format a variable that can be a float or None"""
if x is None:
return str(x)
fmtstr = "{0:.{digits}g}".format(x, digits=digits)
return fmtstr.format(x)
def vectorize_if_needed(func, *x):
""" Helper function to vectorize functions on array inputs"""
if any(map(isiterable, x)):
return np.vectorize(func)(*x)
else:
return func(*x)
def inf_like(x):
"""Return the shape of x with value infinity and dtype='float'.
Preserves 'shape' for both array and scalar inputs.
But always returns a float array, even if x is of integer type.
>>> inf_like(0.) # float scalar
inf
>>> inf_like(1) # integer scalar should give float output
inf
>>> inf_like([0., 1., 2., 3.]) # float list
array([inf, inf, inf, inf])
>>> inf_like([0, 1, 2, 3]) # integer list should give float output
array([inf, inf, inf, inf])
"""
if np.isscalar(x):
return np.inf
else:
return np.full_like(x, np.inf, dtype='float')
# Pre-defined cosmologies. This loops over the parameter sets in the
# parameters module and creates a LambdaCDM or FlatLambdaCDM instance
# with the same name as the parameter set in the current module's namespace.
# Note this assumes all the cosmologies in parameters are LambdaCDM,
# which is true at least as of this writing.
for key in parameters.available:
par = getattr(parameters, key)
if par['flat']:
cosmo = FlatLambdaCDM(par['H0'], par['Om0'], Tcmb0=par['Tcmb0'],
Neff=par['Neff'],
m_nu=u.Quantity(par['m_nu'], u.eV),
name=key,
Ob0=par['Ob0'])
docstr = "{} instance of FlatLambdaCDM cosmology\n\n(from {})"
cosmo.__doc__ = docstr.format(key, par['reference'])
else:
cosmo = LambdaCDM(par['H0'], par['Om0'], par['Ode0'],
Tcmb0=par['Tcmb0'], Neff=par['Neff'],
m_nu=u.Quantity(par['m_nu'], u.eV), name=key,
Ob0=par['Ob0'])
docstr = "{} instance of LambdaCDM cosmology\n\n(from {})"
cosmo.__doc__ = docstr.format(key, par['reference'])
setattr(sys.modules[__name__], key, cosmo)
# don't leave these variables floating around in the namespace
del key, par, cosmo
#########################################################################
# The science state below contains the current cosmology.
#########################################################################
class default_cosmology(ScienceState):
"""
The default cosmology to use. To change it::
>>> from astropy.cosmology import default_cosmology, WMAP7
>>> with default_cosmology.set(WMAP7):
... # WMAP7 cosmology in effect
Or, you may use a string::
>>> with default_cosmology.set('WMAP7'):
... # WMAP7 cosmology in effect
"""
_value = 'Planck15'
@staticmethod
def get_cosmology_from_string(arg):
""" Return a cosmology instance from a string.
"""
if arg == 'no_default':
cosmo = None
else:
try:
cosmo = getattr(sys.modules[__name__], arg)
except AttributeError:
s = "Unknown cosmology '{}'. Valid cosmologies:\n{}".format(
arg, parameters.available)
raise ValueError(s)
return cosmo
@classmethod
def validate(cls, value):
if value is None:
value = 'Planck15'
if isinstance(value, str):
return cls.get_cosmology_from_string(value)
elif isinstance(value, Cosmology):
return value
else:
raise TypeError("default_cosmology must be a string or Cosmology instance.")
|
c765672ded1a41363f1337546713e90ee37396b5bf940652beb624d850e5ab9d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Convenience functions for `astropy.cosmology`.
"""
import warnings
import numpy as np
from .core import CosmologyError
from astropy.units import Quantity
__all__ = ['z_at_value']
__doctest_requires__ = {'*': ['scipy.integrate']}
def z_at_value(func, fval, zmin=1e-8, zmax=1000, ztol=1e-8, maxfun=500):
""" Find the redshift ``z`` at which ``func(z) = fval``.
This finds the redshift at which one of the cosmology functions or
methods (for example Planck13.distmod) is equal to a known value.
.. warning::
Make sure you understand the behavior of the function that you
are trying to invert! Depending on the cosmology, there may not
be a unique solution. For example, in the standard Lambda CDM
cosmology, there are two redshifts which give an angular
diameter distance of 1500 Mpc, z ~ 0.7 and z ~ 3.8. To force
``z_at_value`` to find the solution you are interested in, use the
``zmin`` and ``zmax`` keywords to limit the search range (see the
example below).
Parameters
----------
func : function or method
A function that takes a redshift as input.
fval : astropy.Quantity instance
The value of ``func(z)``.
zmin : float, optional
The lower search limit for ``z``. Beware of divergences
in some cosmological functions, such as distance moduli,
at z=0 (default 1e-8).
zmax : float, optional
The upper search limit for ``z`` (default 1000).
ztol : float, optional
The relative error in ``z`` acceptable for convergence.
maxfun : int, optional
The maximum number of function evaluations allowed in the
optimization routine (default 500).
Returns
-------
z : float
The redshift ``z`` satisfying ``zmin < z < zmax`` and ``func(z) =
fval`` within ``ztol``.
Notes
-----
This works for any arbitrary input cosmology, but is inefficient
if you want to invert a large number of values for the same
cosmology. In this case, it is faster to instead generate an array
of values at many closely-spaced redshifts that cover the relevant
redshift range, and then use interpolation to find the redshift at
each value you're interested in. For example, to efficiently find
the redshifts corresponding to 10^6 values of the distance modulus
in a Planck13 cosmology, you could do the following:
>>> import astropy.units as u
>>> from astropy.cosmology import Planck13, z_at_value
Generate 10^6 distance moduli between 24 and 43 for which we
want to find the corresponding redshifts:
>>> Dvals = (24 + np.random.rand(1e6) * 20) * u.mag
Make a grid of distance moduli covering the redshift range we
need using 50 equally log-spaced values between zmin and
zmax. We use log spacing to adequately sample the steep part of
the curve at low distance moduli:
>>> zmin = z_at_value(Planck13.distmod, Dvals.min())
>>> zmax = z_at_value(Planck13.distmod, Dvals.max())
>>> zgrid = np.logspace(np.log10(zmin), np.log10(zmax), 50)
>>> Dgrid = Planck13.distmod(zgrid)
Finally interpolate to find the redshift at each distance modulus:
>>> zvals = np.interp(Dvals.value, zgrid, Dgrid.value)
Examples
--------
>>> import astropy.units as u
>>> from astropy.cosmology import Planck13, z_at_value
The age and lookback time are monotonic with redshift, and so a
unique solution can be found:
>>> z_at_value(Planck13.age, 2 * u.Gyr)
3.19812268...
The angular diameter is not monotonic however, and there are two
redshifts that give a value of 1500 Mpc. Use the zmin and zmax keywords
to find the one you're interested in:
>>> z_at_value(Planck13.angular_diameter_distance, 1500 * u.Mpc, zmax=1.5)
0.6812769577...
>>> z_at_value(Planck13.angular_diameter_distance, 1500 * u.Mpc, zmin=2.5)
3.7914913242...
Also note that the luminosity distance and distance modulus (two
other commonly inverted quantities) are monotonic in flat and open
universes, but not in closed universes.
"""
from scipy.optimize import fminbound
fval_zmin = func(zmin)
fval_zmax = func(zmax)
if np.sign(fval - fval_zmin) != np.sign(fval_zmax - fval):
warnings.warn("""\
fval is not bracketed by func(zmin) and func(zmax). This means either
there is no solution, or that there is more than one solution between
zmin and zmax satisfying fval = func(z).""")
if isinstance(fval_zmin, Quantity):
val = fval.to_value(fval_zmin.unit)
f = lambda z: abs(func(z).value - val)
else:
f = lambda z: abs(func(z) - fval)
zbest, resval, ierr, ncall = fminbound(f, zmin, zmax, maxfun=maxfun,
full_output=1, xtol=ztol)
if ierr != 0:
warnings.warn('Maximum number of function calls ({}) reached'.format(
ncall))
if np.allclose(zbest, zmax):
raise CosmologyError("Best guess z is very close the upper z limit.\n"
"Try re-running with a different zmax.")
elif np.allclose(zbest, zmin):
raise CosmologyError("Best guess z is very close the lower z limit.\n"
"Try re-running with a different zmin.")
return zbest
|
a07beb2e850d0a0798b98527e160f4f1eb450b1efe1fa16ee58053efedeb1400 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
from astropy.stats.jackknife import jackknife_resampling, jackknife_stats
def test_jackknife_resampling():
data = np.array([1, 2, 3, 4])
answer = np.array([[2, 3, 4], [1, 3, 4], [1, 2, 4], [1, 2, 3]])
assert_equal(answer, jackknife_resampling(data))
# test jackknife stats, except confidence interval
@pytest.mark.skipif('not HAS_SCIPY')
def test_jackknife_stats():
# Test from the third example of Ref.[3]
data = np.array((115, 170, 142, 138, 280, 470, 480, 141, 390))
# true estimate, bias, and std_err
answer = (258.4444, 0.0, 50.25936)
assert_allclose(answer, jackknife_stats(data, np.mean)[0:3], atol=1e-4)
# test jackknife stats, including confidence intervals
@pytest.mark.skipif('not HAS_SCIPY')
def test_jackknife_stats_conf_interval():
# Test from the first example of Ref.[3]
data = np.array([48, 42, 36, 33, 20, 16, 29, 39, 42, 38, 42, 36, 20, 15,
42, 33, 22, 20, 41, 43, 45, 34, 14, 22, 6, 7, 0, 15, 33,
34, 28, 29, 34, 41, 4, 13, 32, 38, 24, 25, 47, 27, 41, 41,
24, 28, 26, 14, 30, 28, 41, 40])
data = np.reshape(data, (-1, 2))
data = data[:, 1]
# true estimate, bias, and std_err
answer = (113.7862, -4.376391, 22.26572)
# calculate the mle of the variance (biased estimator!)
def mle_var(x): return np.sum((x - np.mean(x))*(x - np.mean(x)))/len(x)
assert_allclose(answer, jackknife_stats(data, mle_var, 0.95)[0:3],
atol=1e-4)
# test confidence interval
answer = np.array((70.14615, 157.42616))
assert_allclose(answer, jackknife_stats(data, mle_var, 0.95)[3], atol=1e-4)
|
6de3caae112baaa808eebcafbcc0b4bdd4d9287f645ec5973f39645eda8bbca0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.stats import (histogram, calculate_bin_edges,
scott_bin_width, freedman_bin_width, knuth_bin_width)
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
def test_scott_bin_width(N=10000, rseed=0):
rng = np.random.RandomState(rseed)
X = rng.randn(N)
delta = scott_bin_width(X)
assert_allclose(delta, 3.5 * np.std(X) / N ** (1 / 3))
delta, bins = scott_bin_width(X, return_bins=True)
assert_allclose(delta, 3.5 * np.std(X) / N ** (1 / 3))
with pytest.raises(ValueError):
scott_bin_width(rng.rand(2, 10))
def test_freedman_bin_width(N=10000, rseed=0):
rng = np.random.RandomState(rseed)
X = rng.randn(N)
v25, v75 = np.percentile(X, [25, 75])
delta = freedman_bin_width(X)
assert_allclose(delta, 2 * (v75 - v25) / N ** (1 / 3))
delta, bins = freedman_bin_width(X, return_bins=True)
assert_allclose(delta, 2 * (v75 - v25) / N ** (1 / 3))
with pytest.raises(ValueError):
freedman_bin_width(rng.rand(2, 10))
# data with too small IQR
test_x = [1, 2, 3] + [4] * 100 + [5, 6, 7]
with pytest.raises(ValueError) as e:
freedman_bin_width(test_x, return_bins=True)
assert 'Please use another bin method' in str(e)
# data with small IQR but not too small
test_x = np.asarray([1, 2, 3] * 100 + [4] + [5, 6, 7], dtype=np.float32)
test_x *= 1.5e-6
delta, bins = freedman_bin_width(test_x, return_bins=True)
assert_allclose(delta, 8.923325554510689e-07)
@pytest.mark.skipif('not HAS_SCIPY')
def test_knuth_bin_width(N=10000, rseed=0):
rng = np.random.RandomState(rseed)
X = rng.randn(N)
dx, bins = knuth_bin_width(X, return_bins=True)
assert_allclose(len(bins), 59)
dx2 = knuth_bin_width(X)
assert dx == dx2
with pytest.raises(ValueError):
knuth_bin_width(rng.rand(2, 10))
@pytest.mark.skipif('not HAS_SCIPY')
def test_knuth_histogram(N=1000, rseed=0):
rng = np.random.RandomState(rseed)
x = rng.randn(N)
counts, bins = histogram(x, 'knuth')
assert (counts.sum() == len(x))
assert (len(counts) == len(bins) - 1)
_bin_types_to_test = [30, 'scott', 'freedman', 'blocks']
if HAS_SCIPY:
_bin_types_to_test += ['knuth']
@pytest.mark.parametrize('bin_type',
_bin_types_to_test + [np.linspace(-5, 5, 31)])
def test_histogram(bin_type, N=1000, rseed=0):
rng = np.random.RandomState(rseed)
x = rng.randn(N)
counts, bins = histogram(x, bin_type)
assert (counts.sum() == len(x))
assert (len(counts) == len(bins) - 1)
# Don't include a list of bins as a bin_type here because the effect
# of range is different in that case
@pytest.mark.parametrize('bin_type', _bin_types_to_test)
def test_histogram_range(bin_type, N=1000, rseed=0):
# Regression test for #8010
rng = np.random.RandomState(rseed)
x = rng.randn(N)
range = (0.1, 0.8)
bins = calculate_bin_edges(x, bin_type, range=range)
assert bins.max() == range[1]
assert bins.min() == range[0]
def test_histogram_range_with_bins_list(N=1000, rseed=0):
# The expected result when the input bins is a list is
# the same list on output.
rng = np.random.RandomState(rseed)
x = rng.randn(N)
range = (0.1, 0.8)
input_bins = np.linspace(-5, 5, 31)
bins = calculate_bin_edges(x, input_bins, range=range)
assert all(bins == input_bins)
@pytest.mark.skipif('not HAS_SCIPY')
def test_histogram_output_knuth():
rng = np.random.RandomState(0)
X = rng.randn(100)
counts, bins = histogram(X, bins='knuth')
assert_allclose(counts, [1, 6, 9, 14, 21, 22, 12, 8, 7])
assert_allclose(bins, [-2.55298982, -2.01712932, -1.48126883, -0.94540834,
-0.40954784, 0.12631265, 0.66217314, 1.19803364,
1.73389413, 2.26975462])
def test_histogram_output():
rng = np.random.RandomState(0)
X = rng.randn(100)
counts, bins = histogram(X, bins=10)
assert_allclose(counts, [1, 5, 7, 13, 17, 18, 16, 11, 7, 5])
assert_allclose(bins, [-2.55298982, -2.07071537, -1.58844093, -1.10616648,
-0.62389204, -0.1416176, 0.34065685, 0.82293129,
1.30520574, 1.78748018, 2.26975462])
counts, bins = histogram(X, bins='scott')
assert_allclose(counts, [2, 13, 23, 34, 16, 10, 2])
assert_allclose(bins, [-2.55298982, -1.79299405, -1.03299829, -0.27300252,
0.48699324, 1.24698901, 2.00698477, 2.76698054])
counts, bins = histogram(X, bins='freedman')
assert_allclose(counts, [2, 7, 13, 20, 26, 14, 11, 5, 2])
assert_allclose(bins, [-2.55298982, -1.95796338, -1.36293694, -0.7679105,
-0.17288406, 0.42214237, 1.01716881, 1.61219525,
2.20722169, 2.80224813])
counts, bins = histogram(X, bins='blocks')
assert_allclose(counts, [10, 61, 29])
assert_allclose(bins, [-2.55298982, -1.24381059, 0.46422235, 2.26975462])
def test_histogram_badargs(N=1000, rseed=0):
rng = np.random.RandomState(rseed)
x = rng.randn(N)
# weights is not supported
for bins in ['scott', 'freedman', 'blocks']:
with pytest.raises(NotImplementedError):
histogram(x, bins, weights=x)
# bad bins arg gives ValueError
with pytest.raises(ValueError):
histogram(x, bins='bad_argument')
|
dbeef5d058393cbebd6d4808686cfc2c65d22ee3e7d3f51a3dab58d2ed84d44f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose
try:
from scipy import stats # used in testing
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
from astropy import units as u
from astropy.stats.sigma_clipping import sigma_clip, SigmaClip, sigma_clipped_stats
from astropy.utils.misc import NumpyRNGContext
def test_sigma_clip():
# need to seed the numpy RNG to make sure we don't get some
# amazingly flukey random number that breaks one of the tests
with NumpyRNGContext(12345):
# Amazing, I've got the same combination on my luggage!
randvar = np.random.randn(10000)
filtered_data = sigma_clip(randvar, sigma=1, maxiters=2)
assert sum(filtered_data.mask) > 0
assert sum(~filtered_data.mask) < randvar.size
# this is actually a silly thing to do, because it uses the
# standard deviation as the variance, but it tests to make sure
# these arguments are actually doing something
filtered_data2 = sigma_clip(randvar, sigma=1, maxiters=2,
stdfunc=np.var)
assert not np.all(filtered_data.mask == filtered_data2.mask)
filtered_data3 = sigma_clip(randvar, sigma=1, maxiters=2,
cenfunc=np.mean)
assert not np.all(filtered_data.mask == filtered_data3.mask)
# make sure the maxiters=None method works at all.
filtered_data = sigma_clip(randvar, sigma=3, maxiters=None)
# test copying
assert filtered_data.data[0] == randvar[0]
filtered_data.data[0] += 1.
assert filtered_data.data[0] != randvar[0]
filtered_data = sigma_clip(randvar, sigma=3, maxiters=None,
copy=False)
assert filtered_data.data[0] == randvar[0]
filtered_data.data[0] += 1.
assert filtered_data.data[0] == randvar[0]
# test axis
data = np.arange(5) + np.random.normal(0., 0.05, (5, 5)) + \
np.diag(np.ones(5))
filtered_data = sigma_clip(data, axis=0, sigma=2.3)
assert filtered_data.count() == 20
filtered_data = sigma_clip(data, axis=1, sigma=2.3)
assert filtered_data.count() == 25
@pytest.mark.skipif('not HAS_SCIPY')
def test_compare_to_scipy_sigmaclip():
# need to seed the numpy RNG to make sure we don't get some
# amazingly flukey random number that breaks one of the tests
with NumpyRNGContext(12345):
randvar = np.random.randn(10000)
astropyres = sigma_clip(randvar, sigma=3, maxiters=None,
cenfunc=np.mean)
scipyres = stats.sigmaclip(randvar, 3, 3)[0]
assert astropyres.count() == len(scipyres)
assert_equal(astropyres[~astropyres.mask].data, scipyres)
def test_sigma_clip_scalar_mask():
"""Test that the returned mask is not a scalar."""
data = np.arange(5)
result = sigma_clip(data, sigma=100., maxiters=1)
assert result.mask.shape != ()
def test_sigma_clip_class():
with NumpyRNGContext(12345):
data = np.random.randn(100)
data[10] = 1.e5
sobj = SigmaClip(sigma=1, maxiters=2)
sfunc = sigma_clip(data, sigma=1, maxiters=2)
assert_equal(sobj(data), sfunc)
def test_sigma_clip_mean():
with NumpyRNGContext(12345):
data = np.random.normal(0., 0.05, (10, 10))
data[2, 2] = 1.e5
sobj1 = SigmaClip(sigma=1, maxiters=2, cenfunc='mean')
sobj2 = SigmaClip(sigma=1, maxiters=2, cenfunc=np.nanmean)
assert_equal(sobj1(data), sobj2(data))
assert_equal(sobj1(data, axis=0), sobj2(data, axis=0))
def test_sigma_clip_invalid_cenfunc_stdfunc():
with pytest.raises(ValueError):
SigmaClip(cenfunc='invalid')
with pytest.raises(ValueError):
SigmaClip(stdfunc='invalid')
def test_sigma_clipped_stats():
"""Test list data with input mask or mask_value (#3268)."""
# test list data with mask
data = [0, 1]
mask = np.array([True, False])
result = sigma_clipped_stats(data, mask=mask)
# Check that the result of np.ma.median was converted to a scalar
assert isinstance(result[1], float)
assert result == (1., 1., 0.)
result2 = sigma_clipped_stats(data, mask=mask, axis=0)
assert_equal(result, result2)
# test list data with mask_value
result = sigma_clipped_stats(data, mask_value=0.)
assert isinstance(result[1], float)
assert result == (1., 1., 0.)
# test without mask
data = [0, 2]
result = sigma_clipped_stats(data)
assert isinstance(result[1], float)
assert result == (1., 1., 1.)
_data = np.arange(10)
data = np.ma.MaskedArray([_data, _data, 10 * _data])
mean = sigma_clip(data, axis=0, sigma=1).mean(axis=0)
assert_equal(mean, _data)
mean, median, stddev = sigma_clipped_stats(data, axis=0, sigma=1)
assert_equal(mean, _data)
assert_equal(median, _data)
assert_equal(stddev, np.zeros_like(_data))
def test_sigma_clipped_stats_ddof():
with NumpyRNGContext(12345):
data = np.random.randn(10000)
data[10] = 1.e5
mean1, median1, stddev1 = sigma_clipped_stats(data)
mean2, median2, stddev2 = sigma_clipped_stats(data, std_ddof=1)
assert mean1 == mean2
assert median1 == median2
assert_allclose(stddev1, 0.98156805711673156)
assert_allclose(stddev2, 0.98161731654802831)
def test_invalid_sigma_clip():
"""Test sigma_clip of data containing invalid values."""
data = np.ones((5, 5))
data[2, 2] = 1000
data[3, 4] = np.nan
data[1, 1] = np.inf
result = sigma_clip(data)
# Pre #4051 if data contains any NaN or infs sigma_clip returns the
# mask containing `False` only or TypeError if data also contains a
# masked value.
assert result.mask[2, 2]
assert result.mask[3, 4]
assert result.mask[1, 1]
result2 = sigma_clip(data, axis=0)
assert result2.mask[1, 1]
assert result2.mask[3, 4]
result3 = sigma_clip(data, axis=0, copy=False)
assert result3.mask[1, 1]
assert result3.mask[3, 4]
# stats along axis with all nans
data[0, :] = np.nan # row of all nans
result4, minarr, maxarr = sigma_clip(data, axis=1, masked=False,
return_bounds=True)
assert np.isnan(minarr[0])
assert np.isnan(maxarr[0])
def test_sigmaclip_negative_axis():
"""Test that dimensions are expanded correctly even if axis is negative."""
data = np.ones((3, 4))
# without correct expand_dims this would raise a ValueError
sigma_clip(data, axis=-1)
def test_sigmaclip_fully_masked():
"""Make sure a fully masked array is returned when sigma clipping a fully
masked array.
"""
data = np.ma.MaskedArray(data=[[1., 0.], [0., 1.]],
mask=[[True, True], [True, True]])
clipped_data = sigma_clip(data)
np.ma.allequal(data, clipped_data)
def test_sigmaclip_empty_masked():
"""Make sure a empty masked array is returned when sigma clipping an empty
masked array.
"""
data = np.ma.MaskedArray(data=[], mask=[])
clipped_data = sigma_clip(data)
np.ma.allequal(data, clipped_data)
def test_sigmaclip_empty():
"""Make sure a empty array is returned when sigma clipping an empty array.
"""
data = np.array([])
clipped_data = sigma_clip(data)
assert_equal(data, clipped_data)
def test_sigma_clip_axis_tuple_3D():
"""Test sigma clipping over a subset of axes (issue #7227).
"""
data = np.sin(0.78 * np.arange(27)).reshape(3, 3, 3)
mask = np.zeros_like(data, dtype=np.bool)
data_t = np.rollaxis(data, 1, 0)
mask_t = np.rollaxis(mask, 1, 0)
# Loop over what was originally axis 1 and clip each plane directly:
for data_plane, mask_plane in zip(data_t, mask_t):
mean = data_plane.mean()
maxdev = 1.5 * data_plane.std()
mask_plane[:] = np.logical_or(data_plane < mean - maxdev,
data_plane > mean + maxdev)
# Do the equivalent thing using sigma_clip:
result = sigma_clip(data, sigma=1.5, cenfunc=np.mean, maxiters=1,
axis=(0, -1))
assert_equal(result.mask, mask)
def test_sigmaclip_repr():
sigclip = SigmaClip()
sigclip_repr = ('SigmaClip(sigma=3.0, sigma_lower=3.0, sigma_upper=3.0,'
' maxiters=5, cenfunc=')
sigclip_str = ('<SigmaClip>\n sigma: 3.0\n sigma_lower: 3.0\n'
' sigma_upper: 3.0\n maxiters: 5\n cenfunc: ')
assert repr(sigclip).startswith(sigclip_repr)
assert str(sigclip).startswith(sigclip_str)
def test_sigma_clippped_stats_unit():
data = np.array([1, 1]) * u.kpc
result = sigma_clipped_stats(data)
assert result == (1. * u.kpc, 1. * u.kpc, 0. * u.kpc)
|
c78b36e2131d12f784ba3b0548251dfee520ec789b0f46dc35e01321d456f4ef |
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from astropy import units as u
try:
import scipy.stats
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
from astropy.stats.circstats import _length, circmean, circvar, circmoment, circcorrcoef
from astropy.stats.circstats import rayleightest, vtest, vonmisesmle
def test__length():
# testing against R CircStats package
# Ref. [1] pages 6 and 125
weights = np.array([12, 1, 6, 1, 2, 1, 1])
answer = 0.766282
data = np.array([0, 3.6, 36, 72, 108, 169.2, 324])*u.deg
assert_allclose(answer, _length(data, weights=weights), atol=1e-4)
def test_circmean():
# testing against R CircStats package
# Ref[1], page 23
data = np.array([51, 67, 40, 109, 31, 358])*u.deg
answer = 48.63*u.deg
assert_equal(answer, np.around(circmean(data), 2))
@pytest.mark.skipif('not HAS_SCIPY')
def test_circmean_against_scipy():
# testing against scipy.stats.circmean function
# the data is the same as the test before, but in radians
data = np.array([0.89011792, 1.1693706, 0.6981317, 1.90240888, 0.54105207,
6.24827872])
answer = scipy.stats.circmean(data)
assert_equal(np.around(answer, 2), np.around(circmean(data), 2))
def test_circvar():
# testing against R CircStats package
# Ref[1], page 23
data = np.array([51, 67, 40, 109, 31, 358])*u.deg
answer = 0.1635635
assert_allclose(answer, circvar(data), atol=1e-4)
def test_circmoment():
# testing against R CircStats package
# Ref[1], page 23
data = np.array([51, 67, 40, 109, 31, 358])*u.deg
# 2nd, 3rd, and 4th moments
# this is the answer given in Ref[1] in radians
answer = np.array([1.588121, 1.963919, 2.685556])
answer = np.around(np.rad2deg(answer)*u.deg, 4)
result = (np.around(circmoment(data, p=2)[0], 4),
np.around(circmoment(data, p=3)[0], 4),
np.around(circmoment(data, p=4)[0], 4))
assert_equal(answer[0], result[0])
assert_equal(answer[1], result[1])
assert_equal(answer[2], result[2])
# testing lengths
answer = np.array([0.4800428, 0.236541, 0.2255761])
assert_allclose(answer, (circmoment(data, p=2)[1],
circmoment(data, p=3)[1],
circmoment(data, p=4)[1]), atol=1e-4)
def test_circcorrcoef():
# testing against R CircStats package
# Ref[1], page 180
alpha = np.array([356, 97, 211, 232, 343, 292, 157, 302, 335, 302, 324,
85, 324, 340, 157, 238, 254, 146, 232, 122, 329])*u.deg
beta = np.array([119, 162, 221, 259, 270, 29, 97, 292, 40, 313, 94, 45,
47, 108, 221, 270, 119, 248, 270, 45, 23])*u.deg
answer = 0.2704648
assert_allclose(answer, circcorrcoef(alpha, beta), atol=1e-4)
def test_rayleightest():
# testing against R CircStats package
data = np.array([190.18, 175.48, 155.95, 217.83, 156.36])*u.deg
# answer was obtained through R CircStats function r.test(x)
answer = (0.00640418, 0.9202565)
result = (rayleightest(data), _length(data))
assert_allclose(answer[0], result[0], atol=1e-4)
assert_allclose(answer[1], result[1], atol=1e-4)
@pytest.mark.skipif('not HAS_SCIPY')
def test_vtest():
# testing against R CircStats package
data = np.array([190.18, 175.48, 155.95, 217.83, 156.36])*u.deg
# answer was obtained through R CircStats function v0.test(x)
answer = 0.9994725
assert_allclose(answer, vtest(data), atol=1e-5)
def test_vonmisesmle():
# testing against R CircStats package
# testing non-Quantity
data = np.array([3.3699057, 4.0411630, 0.5014477, 2.6223103, 3.7336524,
1.8136389, 4.1566039, 2.7806317, 2.4672173,
2.8493644])
# answer was obtained through R CircStats function vm.ml(x)
answer = (3.006514, 1.474132)
assert_allclose(answer[0], vonmisesmle(data)[0], atol=1e-5)
assert_allclose(answer[1], vonmisesmle(data)[1], atol=1e-5)
# testing with Quantity
data = np.rad2deg(data)*u.deg
answer = np.rad2deg(3.006514)*u.deg
assert_equal(np.around(answer, 3), np.around(vonmisesmle(data)[0], 3))
|
2d56091a9752859180dc8c6bd7810d77ace40c9b81046d56f10c92deafbfa3b5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.stats import bayesian_blocks, RegularEvents
def test_single_change_point(rseed=0):
rng = np.random.RandomState(rseed)
x = np.concatenate([rng.rand(100),
1 + rng.rand(200)])
bins = bayesian_blocks(x)
assert (len(bins) == 3)
assert_allclose(bins[1], 1, rtol=0.02)
def test_duplicate_events(rseed=0):
rng = np.random.RandomState(rseed)
t = rng.rand(100)
t[80:] = t[:20]
x = np.ones_like(t)
x[:20] += 1
bins1 = bayesian_blocks(t)
bins2 = bayesian_blocks(t[:80], x[:80])
assert_allclose(bins1, bins2)
def test_measures_fitness_homoscedastic(rseed=0):
rng = np.random.RandomState(rseed)
t = np.linspace(0, 1, 11)
x = np.exp(-0.5 * (t - 0.5) ** 2 / 0.01 ** 2)
sigma = 0.05
x = x + sigma * rng.randn(len(x))
bins = bayesian_blocks(t, x, sigma, fitness='measures')
assert_allclose(bins, [0, 0.45, 0.55, 1])
def test_measures_fitness_heteroscedastic():
rng = np.random.RandomState(1)
t = np.linspace(0, 1, 11)
x = np.exp(-0.5 * (t - 0.5) ** 2 / 0.01 ** 2)
sigma = 0.02 + 0.02 * rng.rand(len(x))
x = x + sigma * rng.randn(len(x))
bins = bayesian_blocks(t, x, sigma, fitness='measures')
assert_allclose(bins, [0, 0.45, 0.55, 1])
def test_regular_events():
rng = np.random.RandomState(0)
dt = 0.01
steps = np.concatenate([np.unique(rng.randint(0, 500, 100)),
np.unique(rng.randint(500, 1000, 200))])
t = dt * steps
# string fitness
bins1 = bayesian_blocks(t, fitness='regular_events', dt=dt)
assert (len(bins1) == 3)
assert_allclose(bins1[1], 5, rtol=0.05)
# class name fitness
bins2 = bayesian_blocks(t, fitness=RegularEvents, dt=dt)
assert_allclose(bins1, bins2)
# class instance fitness
bins3 = bayesian_blocks(t, fitness=RegularEvents(dt=dt))
assert_allclose(bins1, bins3)
def test_errors():
rng = np.random.RandomState(0)
t = rng.rand(100)
# x must be integer or None for events
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='events', x=t)
# x must be binary for regular events
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='regular_events', x=10 * t, dt=1)
# x must be specified for measures
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='measures')
# sigma cannot be specified without x
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='events', sigma=0.5)
# length of x must match length of t
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='measures', x=t[:-1])
# repeated values in t fail when x is specified
t2 = t.copy()
t2[1] = t2[0]
with pytest.raises(ValueError):
bayesian_blocks(t2, fitness='measures', x=t)
# sigma must be broadcastable with x
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='measures', x=t, sigma=t[:-1])
def test_fitness_function_results():
"""Test results for several fitness functions"""
rng = np.random.RandomState(42)
# Event Data
t = rng.randn(100)
edges = bayesian_blocks(t, fitness='events')
assert_allclose(edges, [-2.6197451, -0.71094865, 0.36866702, 1.85227818])
# Event data with repeats
t[80:] = t[:20]
edges = bayesian_blocks(t, fitness='events', p0=0.01)
assert_allclose(edges, [-2.6197451, -0.47432431, -0.46202823, 1.85227818])
# Regular event data
dt = 0.01
t = dt * np.arange(1000)
x = np.zeros(len(t))
N = len(t) // 10
x[rng.randint(0, len(t), N)] = 1
x[rng.randint(0, len(t) // 2, N)] = 1
edges = bayesian_blocks(t, x, fitness='regular_events', dt=dt)
assert_allclose(edges, [0, 5.105, 9.99])
# Measured point data with errors
t = 100 * rng.rand(20)
x = np.exp(-0.5 * (t - 50) ** 2)
sigma = 0.1
x_obs = x + sigma * rng.randn(len(x))
edges = bayesian_blocks(t, x_obs, sigma, fitness='measures')
expected = [4.360377, 48.456895, 52.597917, 99.455051]
assert_allclose(edges, expected)
# Optional arguments are passed (p0)
p0_sel = 0.05
edges = bayesian_blocks(t, x_obs, sigma, fitness='measures', p0=p0_sel)
assert_allclose(edges, expected)
# Optional arguments are passed (ncp_prior)
ncp_prior_sel = 4 - np.log(73.53 * p0_sel * (len(t) ** -0.478))
edges = bayesian_blocks(t, x_obs, sigma, fitness='measures',
ncp_prior=ncp_prior_sel)
assert_allclose(edges, expected)
# Optional arguments are passed (gamma)
gamma_sel = np.exp(-ncp_prior_sel)
edges = bayesian_blocks(t, x_obs, sigma, fitness='measures',
gamma=gamma_sel)
assert_allclose(edges, expected)
|
c9760f23538e1418d2690cdc41fc5b70ec75e8162430feeb9bf62910656da6bd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
try:
import mpmath # pylint: disable=W0611
except ImportError:
HAS_MPMATH = False
else:
HAS_MPMATH = True
from astropy.stats import funcs
from astropy import units as u
from astropy.tests.helper import catch_warnings
from astropy.utils.misc import NumpyRNGContext
def test_median_absolute_deviation():
with NumpyRNGContext(12345):
# test that it runs
randvar = np.random.randn(10000)
mad = funcs.median_absolute_deviation(randvar)
# test whether an array is returned if an axis is used
randvar = randvar.reshape((10, 1000))
mad = funcs.median_absolute_deviation(randvar, axis=1)
assert len(mad) == 10
assert mad.size < randvar.size
mad = funcs.median_absolute_deviation(randvar, axis=0)
assert len(mad) == 1000
assert mad.size < randvar.size
# Test some actual values in a 3 dimensional array
x = np.arange(3 * 4 * 5)
a = np.array([sum(x[:i + 1]) for i in range(len(x))]).reshape(3, 4, 5)
mad = funcs.median_absolute_deviation(a)
assert mad == 389.5
mad = funcs.median_absolute_deviation(a, axis=0)
assert_allclose(mad, [[210., 230., 250., 270., 290.],
[310., 330., 350., 370., 390.],
[410., 430., 450., 470., 490.],
[510., 530., 550., 570., 590.]])
mad = funcs.median_absolute_deviation(a, axis=1)
assert_allclose(mad, [[27.5, 32.5, 37.5, 42.5, 47.5],
[127.5, 132.5, 137.5, 142.5, 147.5],
[227.5, 232.5, 237.5, 242.5, 247.5]])
mad = funcs.median_absolute_deviation(a, axis=2)
assert_allclose(mad, [[3., 8., 13., 18.],
[23., 28., 33., 38.],
[43., 48., 53., 58.]])
def test_median_absolute_deviation_masked():
# Based on the changes introduces in #4658
# normal masked arrays without masked values are handled like normal
# numpy arrays
array = np.ma.array([1, 2, 3])
assert funcs.median_absolute_deviation(array) == 1
# masked numpy arrays return something different (rank 0 masked array)
# but one can still compare it without np.all!
array = np.ma.array([1, 4, 3], mask=[0, 1, 0])
assert funcs.median_absolute_deviation(array) == 1
# Just cross check if that's identical to the function on the unmasked
# values only
assert funcs.median_absolute_deviation(array) == (
funcs.median_absolute_deviation(array[~array.mask]))
# Multidimensional masked array
array = np.ma.array([[1, 4], [2, 2]], mask=[[1, 0], [0, 0]])
funcs.median_absolute_deviation(array)
assert funcs.median_absolute_deviation(array) == 0
# Just to compare it with the data without mask:
assert funcs.median_absolute_deviation(array.data) == 0.5
# And check if they are also broadcasted correctly
np.testing.assert_array_equal(
funcs.median_absolute_deviation(array, axis=0).data, [0, 1])
np.testing.assert_array_equal(
funcs.median_absolute_deviation(array, axis=1).data, [0, 0])
def test_median_absolute_deviation_nans():
array = np.array([[1, 4, 3, np.nan],
[2, 5, np.nan, 4]])
assert_equal(funcs.median_absolute_deviation(array, func=np.nanmedian,
axis=1), [1, 1])
array = np.ma.masked_invalid(array)
assert funcs.median_absolute_deviation(array) == 1
def test_median_absolute_deviation_multidim_axis():
array = np.ones((5, 4, 3)) * np.arange(5)[:, np.newaxis, np.newaxis]
assert_equal(funcs.median_absolute_deviation(array, axis=(1, 2)),
np.zeros(5))
assert_equal(funcs.median_absolute_deviation(
array, axis=np.array([1, 2])), np.zeros(5))
def test_median_absolute_deviation_quantity():
# Based on the changes introduces in #4658
# Just a small test that this function accepts Quantities and returns a
# quantity
a = np.array([1, 16, 5]) * u.m
mad = funcs.median_absolute_deviation(a)
# Check for the correct unit and that the result is identical to the
# result without units.
assert mad.unit == a.unit
assert mad.value == funcs.median_absolute_deviation(a.value)
@pytest.mark.skipif('not HAS_SCIPY')
def test_binom_conf_interval():
# Test Wilson and Jeffreys interval for corner cases:
# Corner cases: k = 0, k = n, conf = 0., conf = 1.
n = 5
k = [0, 4, 5]
for conf in [0., 0.5, 1.]:
res = funcs.binom_conf_interval(k, n, conf=conf, interval='wilson')
assert ((res >= 0.) & (res <= 1.)).all()
res = funcs.binom_conf_interval(k, n, conf=conf, interval='jeffreys')
assert ((res >= 0.) & (res <= 1.)).all()
# Test Jeffreys interval accuracy against table in Brown et al. (2001).
# (See `binom_conf_interval` docstring for reference.)
k = [0, 1, 2, 3, 4]
n = 7
conf = 0.95
result = funcs.binom_conf_interval(k, n, conf=conf, interval='jeffreys')
table = np.array([[0.000, 0.016, 0.065, 0.139, 0.234],
[0.292, 0.501, 0.648, 0.766, 0.861]])
assert_allclose(result, table, atol=1.e-3, rtol=0.)
# Test scalar version
result = np.array([funcs.binom_conf_interval(kval, n, conf=conf,
interval='jeffreys')
for kval in k]).transpose()
assert_allclose(result, table, atol=1.e-3, rtol=0.)
# Test flat
result = funcs.binom_conf_interval(k, n, conf=conf, interval='flat')
table = np.array([[0., 0.03185, 0.08523, 0.15701, 0.24486],
[0.36941, 0.52650, 0.65085, 0.75513, 0.84298]])
assert_allclose(result, table, atol=1.e-3, rtol=0.)
# Test scalar version
result = np.array([funcs.binom_conf_interval(kval, n, conf=conf,
interval='flat')
for kval in k]).transpose()
assert_allclose(result, table, atol=1.e-3, rtol=0.)
# Test Wald interval
result = funcs.binom_conf_interval(0, 5, interval='wald')
assert_allclose(result, 0.) # conf interval is [0, 0] when k = 0
result = funcs.binom_conf_interval(5, 5, interval='wald')
assert_allclose(result, 1.) # conf interval is [1, 1] when k = n
result = funcs.binom_conf_interval(500, 1000, conf=0.68269,
interval='wald')
assert_allclose(result[0], 0.5 - 0.5 / np.sqrt(1000.))
assert_allclose(result[1], 0.5 + 0.5 / np.sqrt(1000.))
# Test shapes
k = 3
n = 7
for interval in ['wald', 'wilson', 'jeffreys', 'flat']:
result = funcs.binom_conf_interval(k, n, interval=interval)
assert result.shape == (2,)
k = np.array(k)
for interval in ['wald', 'wilson', 'jeffreys', 'flat']:
result = funcs.binom_conf_interval(k, n, interval=interval)
assert result.shape == (2,)
n = np.array(n)
for interval in ['wald', 'wilson', 'jeffreys', 'flat']:
result = funcs.binom_conf_interval(k, n, interval=interval)
assert result.shape == (2,)
k = np.array([1, 3, 5])
for interval in ['wald', 'wilson', 'jeffreys', 'flat']:
result = funcs.binom_conf_interval(k, n, interval=interval)
assert result.shape == (2, 3)
n = np.array([5, 5, 5])
for interval in ['wald', 'wilson', 'jeffreys', 'flat']:
result = funcs.binom_conf_interval(k, n, interval=interval)
assert result.shape == (2, 3)
@pytest.mark.skipif('not HAS_SCIPY')
def test_binned_binom_proportion():
# Check that it works.
nbins = 20
x = np.linspace(0., 10., 100) # Guarantee an `x` in every bin.
success = np.ones(len(x), dtype=bool)
bin_ctr, bin_hw, p, perr = funcs.binned_binom_proportion(x, success,
bins=nbins)
# Check shape of outputs
assert bin_ctr.shape == (nbins,)
assert bin_hw.shape == (nbins,)
assert p.shape == (nbins,)
assert perr.shape == (2, nbins)
# Check that p is 1 in all bins, since success = True for all `x`.
assert (p == 1.).all()
# Check that p is 0 in all bins if success = False for all `x`.
success[:] = False
bin_ctr, bin_hw, p, perr = funcs.binned_binom_proportion(x, success,
bins=nbins)
assert (p == 0.).all()
def test_signal_to_noise_oir_ccd():
result = funcs.signal_to_noise_oir_ccd(1, 25, 0, 0, 0, 1)
assert 5.0 == result
# check to make sure gain works
result = funcs.signal_to_noise_oir_ccd(1, 5, 0, 0, 0, 1, 5)
assert 5.0 == result
# now add in sky, dark current, and read noise
# make sure the snr goes down
result = funcs.signal_to_noise_oir_ccd(1, 25, 1, 0, 0, 1)
assert result < 5.0
result = funcs.signal_to_noise_oir_ccd(1, 25, 0, 1, 0, 1)
assert result < 5.0
result = funcs.signal_to_noise_oir_ccd(1, 25, 0, 0, 1, 1)
assert result < 5.0
# make sure snr increases with time
result = funcs.signal_to_noise_oir_ccd(2, 25, 0, 0, 0, 1)
assert result > 5.0
def test_bootstrap():
bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
# test general bootstrapping
answer = np.array([[7, 4, 8, 5, 7, 0, 3, 7, 8, 5],
[4, 8, 8, 3, 6, 5, 2, 8, 6, 2]])
with NumpyRNGContext(42):
assert_equal(answer, funcs.bootstrap(bootarr, 2))
# test with a bootfunction
with NumpyRNGContext(42):
bootresult = np.mean(funcs.bootstrap(bootarr, 10000, bootfunc=np.mean))
assert_allclose(np.mean(bootarr), bootresult, atol=0.01)
@pytest.mark.skipif('not HAS_SCIPY')
def test_bootstrap_multiple_outputs():
from scipy.stats import spearmanr
# test a bootfunc with several output values
# return just bootstrapping with one output from bootfunc
with NumpyRNGContext(42):
bootarr = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 0],
[4, 8, 8, 3, 6, 5, 2, 8, 6, 2]]).T
answer = np.array((0.19425, 0.02094))
def bootfunc(x): return spearmanr(x)[0]
bootresult = funcs.bootstrap(bootarr, 2,
bootfunc=bootfunc)
assert_allclose(answer, bootresult, atol=1e-3)
# test a bootfunc with several output values
# return just bootstrapping with the second output from bootfunc
with NumpyRNGContext(42):
bootarr = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 0],
[4, 8, 8, 3, 6, 5, 2, 8, 6, 2]]).T
answer = np.array((0.5907,
0.9541))
def bootfunc(x): return spearmanr(x)[1]
bootresult = funcs.bootstrap(bootarr, 2,
bootfunc=bootfunc)
assert_allclose(answer, bootresult, atol=1e-3)
# return just bootstrapping with two outputs from bootfunc
with NumpyRNGContext(42):
answer = np.array(((0.1942, 0.5907),
(0.0209, 0.9541),
(0.4286, 0.2165)))
def bootfunc(x): return spearmanr(x)
bootresult = funcs.bootstrap(bootarr, 3,
bootfunc=bootfunc)
assert bootresult.shape == (3, 2)
assert_allclose(answer, bootresult, atol=1e-3)
def test_mad_std():
with NumpyRNGContext(12345):
data = np.random.normal(5, 2, size=(100, 100))
assert_allclose(funcs.mad_std(data), 2.0, rtol=0.05)
def test_mad_std_scalar_return():
with NumpyRNGContext(12345):
data = np.random.normal(5, 2, size=(10, 10))
# make a masked array with no masked points
data = np.ma.masked_where(np.isnan(data), data)
rslt = funcs.mad_std(data)
# want a scalar result, NOT a masked array
assert np.isscalar(rslt)
data[5, 5] = np.nan
rslt = funcs.mad_std(data, ignore_nan=True)
assert np.isscalar(rslt)
with catch_warnings():
rslt = funcs.mad_std(data)
assert np.isscalar(rslt)
try:
assert not np.isnan(rslt)
# This might not be an issue anymore when only numpy>=1.13 is
# supported. NUMPY_LT_1_13 xref #7267
except AssertionError:
pytest.xfail('See #5232')
def test_mad_std_warns():
with NumpyRNGContext(12345):
data = np.random.normal(5, 2, size=(10, 10))
data[5, 5] = np.nan
with catch_warnings() as warns:
rslt = funcs.mad_std(data, ignore_nan=False)
assert np.isnan(rslt)
def test_mad_std_withnan():
with NumpyRNGContext(12345):
data = np.empty([102, 102])
data[:] = np.nan
data[1:-1, 1:-1] = np.random.normal(5, 2, size=(100, 100))
assert_allclose(funcs.mad_std(data, ignore_nan=True), 2.0, rtol=0.05)
assert np.isnan(funcs.mad_std([1, 2, 3, 4, 5, np.nan]))
assert_allclose(funcs.mad_std([1, 2, 3, 4, 5, np.nan], ignore_nan=True),
1.482602218505602)
def test_mad_std_with_axis():
data = np.array([[1, 2, 3, 4],
[4, 3, 2, 1]])
# results follow data symmetry
result_axis0 = np.array([2.22390333, 0.74130111, 0.74130111,
2.22390333])
result_axis1 = np.array([1.48260222, 1.48260222])
assert_allclose(funcs.mad_std(data, axis=0), result_axis0)
assert_allclose(funcs.mad_std(data, axis=1), result_axis1)
def test_mad_std_with_axis_and_nan():
data = np.array([[1, 2, 3, 4, np.nan],
[4, 3, 2, 1, np.nan]])
# results follow data symmetry
result_axis0 = np.array([2.22390333, 0.74130111, 0.74130111,
2.22390333, np.nan])
result_axis1 = np.array([1.48260222, 1.48260222])
assert_allclose(funcs.mad_std(data, axis=0, ignore_nan=True), result_axis0)
assert_allclose(funcs.mad_std(data, axis=1, ignore_nan=True), result_axis1)
def test_mad_std_with_axis_and_nan_array_type():
# mad_std should return a masked array if given one, and not otherwise
data = np.array([[1, 2, 3, 4, np.nan],
[4, 3, 2, 1, np.nan]])
result = funcs.mad_std(data, axis=0, ignore_nan=True)
assert not np.ma.isMaskedArray(result)
data = np.ma.masked_where(np.isnan(data), data)
result = funcs.mad_std(data, axis=0, ignore_nan=True)
assert np.ma.isMaskedArray(result)
def test_gaussian_fwhm_to_sigma():
fwhm = (2.0 * np.sqrt(2.0 * np.log(2.0)))
assert_allclose(funcs.gaussian_fwhm_to_sigma * fwhm, 1.0, rtol=1.0e-6)
def test_gaussian_sigma_to_fwhm():
sigma = 1.0 / (2.0 * np.sqrt(2.0 * np.log(2.0)))
assert_allclose(funcs.gaussian_sigma_to_fwhm * sigma, 1.0, rtol=1.0e-6)
def test_gaussian_sigma_to_fwhm_to_sigma():
assert_allclose(funcs.gaussian_fwhm_to_sigma *
funcs.gaussian_sigma_to_fwhm, 1.0)
def test_poisson_conf_interval_rootn():
assert_allclose(funcs.poisson_conf_interval(16, interval='root-n'),
(12, 20))
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('interval', ['root-n-0',
'pearson',
'sherpagehrels',
'frequentist-confidence'])
def test_poisson_conf_large(interval):
n = 100
assert_allclose(funcs.poisson_conf_interval(n, interval='root-n'),
funcs.poisson_conf_interval(n, interval=interval),
rtol=2e-2)
def test_poisson_conf_array_rootn0_zero():
n = np.zeros((3, 4, 5))
assert_allclose(funcs.poisson_conf_interval(n, interval='root-n-0'),
funcs.poisson_conf_interval(n[0, 0, 0], interval='root-n-0')[:, None, None, None] * np.ones_like(n))
assert not np.any(np.isnan(
funcs.poisson_conf_interval(n, interval='root-n-0')))
@pytest.mark.skipif('not HAS_SCIPY')
def test_poisson_conf_array_frequentist_confidence_zero():
n = np.zeros((3, 4, 5))
assert_allclose(
funcs.poisson_conf_interval(n, interval='frequentist-confidence'),
funcs.poisson_conf_interval(n[0, 0, 0], interval='frequentist-confidence')[:, None, None, None] * np.ones_like(n))
assert not np.any(np.isnan(
funcs.poisson_conf_interval(n, interval='root-n-0')))
def test_poisson_conf_list_rootn0_zero():
n = [0, 0, 0]
assert_allclose(funcs.poisson_conf_interval(n, interval='root-n-0'),
[[0, 0, 0], [1, 1, 1]])
assert not np.any(np.isnan(
funcs.poisson_conf_interval(n, interval='root-n-0')))
def test_poisson_conf_array_rootn0():
n = 7 * np.ones((3, 4, 5))
assert_allclose(funcs.poisson_conf_interval(n, interval='root-n-0'),
funcs.poisson_conf_interval(n[0, 0, 0], interval='root-n-0')[:, None, None, None] * np.ones_like(n))
n[1, 2, 3] = 0
assert not np.any(np.isnan(
funcs.poisson_conf_interval(n, interval='root-n-0')))
@pytest.mark.skipif('not HAS_SCIPY')
def test_poisson_conf_array_fc():
n = 7 * np.ones((3, 4, 5))
assert_allclose(
funcs.poisson_conf_interval(n, interval='frequentist-confidence'),
funcs.poisson_conf_interval(n[0, 0, 0], interval='frequentist-confidence')[:, None, None, None] * np.ones_like(n))
n[1, 2, 3] = 0
assert not np.any(np.isnan(
funcs.poisson_conf_interval(n, interval='frequentist-confidence')))
@pytest.mark.skipif('not HAS_SCIPY')
def test_poisson_conf_frequentist_confidence_gehrels():
"""Test intervals against those published in Gehrels 1986"""
nlh = np.array([(0, 0, 1.841),
(1, 0.173, 3.300),
(2, 0.708, 4.638),
(3, 1.367, 5.918),
(4, 2.086, 7.163),
(5, 2.840, 8.382),
(6, 3.620, 9.584),
(7, 4.419, 10.77),
(8, 5.232, 11.95),
(9, 6.057, 13.11),
(10, 6.891, 14.27),
])
assert_allclose(
funcs.poisson_conf_interval(nlh[:, 0],
interval='frequentist-confidence'),
nlh[:, 1:].T, rtol=0.001, atol=0.001)
@pytest.mark.skipif('not HAS_SCIPY')
def test_poisson_conf_frequentist_confidence_gehrels_2sigma():
"""Test intervals against those published in Gehrels 1986
Note: I think there's a typo (transposition of digits) in Gehrels 1986,
specifically for the two-sigma lower limit for 3 events; they claim
0.569 but this function returns 0.59623...
"""
nlh = np.array([(0, 2, 0, 3.783),
(1, 2, 2.30e-2, 5.683),
(2, 2, 0.230, 7.348),
(3, 2, 0.596, 8.902),
(4, 2, 1.058, 10.39),
(5, 2, 1.583, 11.82),
(6, 2, 2.153, 13.22),
(7, 2, 2.758, 14.59),
(8, 2, 3.391, 15.94),
(9, 2, 4.046, 17.27),
(10, 2, 4.719, 18.58)])
assert_allclose(
funcs.poisson_conf_interval(nlh[:, 0], sigma=2,
interval='frequentist-confidence').T,
nlh[:, 2:], rtol=0.01)
@pytest.mark.skipif('not HAS_SCIPY')
def test_poisson_conf_frequentist_confidence_gehrels_3sigma():
"""Test intervals against those published in Gehrels 1986"""
nlh = np.array([(0, 3, 0, 6.608),
(1, 3, 1.35e-3, 8.900),
(2, 3, 5.29e-2, 10.87),
(3, 3, 0.212, 12.68),
(4, 3, 0.465, 14.39),
(5, 3, 0.792, 16.03),
(6, 3, 1.175, 17.62),
(7, 3, 1.603, 19.17),
(8, 3, 2.068, 20.69),
(9, 3, 2.563, 22.18),
(10, 3, 3.084, 23.64),
])
assert_allclose(
funcs.poisson_conf_interval(nlh[:, 0], sigma=3,
interval='frequentist-confidence').T,
nlh[:, 2:], rtol=0.01, verbose=True)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('n', [0, 1, 2, 3, 10, 20, 100])
def test_poisson_conf_gehrels86(n):
assert_allclose(
funcs.poisson_conf_interval(n, interval='sherpagehrels')[1],
funcs.poisson_conf_interval(n, interval='frequentist-confidence')[1],
rtol=0.02)
@pytest.mark.skipif('not HAS_SCIPY')
def test_scipy_poisson_limit():
'''Test that the lower-level routine gives the snae number.
Test numbers are from table1 1, 3 in
Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <http://adsabs.harvard.edu/abs/1991ApJ...374..344K>`_
'''
assert_allclose(funcs._scipy_kraft_burrows_nousek(5., 2.5, .99),
(0, 10.67), rtol=1e-3)
conf = funcs.poisson_conf_interval([5., 6.], 'kraft-burrows-nousek',
background=[2.5, 2.],
conflevel=[.99, .9])
assert_allclose(conf[:, 0], (0, 10.67), rtol=1e-3)
assert_allclose(conf[:, 1], (0.81, 8.99), rtol=5e-3)
@pytest.mark.skipif('not HAS_MPMATH')
def test_mpmath_poisson_limit():
assert_allclose(funcs._mpmath_kraft_burrows_nousek(6., 2., .9),
(0.81, 8.99), rtol=5e-3)
assert_allclose(funcs._mpmath_kraft_burrows_nousek(5., 2.5, .99),
(0, 10.67), rtol=1e-3)
@pytest.mark.skipif('not HAS_SCIPY')
def test_poisson_conf_value_errors():
with pytest.raises(ValueError) as e:
funcs.poisson_conf_interval([5, 6], 'root-n', sigma=2)
assert 'Only sigma=1 supported' in str(e.value)
with pytest.raises(ValueError) as e:
funcs.poisson_conf_interval([5, 6], 'pearson', background=[2.5, 2.])
assert 'background not supported' in str(e.value)
with pytest.raises(ValueError) as e:
funcs.poisson_conf_interval([5, 6], 'sherpagehrels',
conflevel=[2.5, 2.])
assert 'conflevel not supported' in str(e.value)
with pytest.raises(ValueError) as e:
funcs.poisson_conf_interval(1, 'foo')
assert 'Invalid method' in str(e.value)
@pytest.mark.skipif('not HAS_SCIPY')
def test_poisson_conf_kbn_value_errors():
with pytest.raises(ValueError) as e:
funcs.poisson_conf_interval(5., 'kraft-burrows-nousek',
background=2.5,
conflevel=99)
assert 'number between 0 and 1' in str(e.value)
with pytest.raises(ValueError) as e:
funcs.poisson_conf_interval(5., 'kraft-burrows-nousek',
background=2.5)
assert 'Set conflevel for method' in str(e.value)
with pytest.raises(ValueError) as e:
funcs.poisson_conf_interval(5., 'kraft-burrows-nousek',
background=-2.5,
conflevel=.99)
assert 'Background must be' in str(e.value)
@pytest.mark.skipif('HAS_SCIPY or HAS_MPMATH')
def test_poisson_limit_nodependencies():
with pytest.raises(ImportError):
funcs.poisson_conf_interval(20., interval='kraft-burrows-nousek',
background=10., conflevel=.95)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('N', [10, 100, 1000, 10000])
def test_uniform(N):
with NumpyRNGContext(12345):
assert funcs.kuiper(np.random.random(N))[1] > 0.01
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('N,M', [(100, 100),
(20, 100),
(100, 20),
(10, 20),
(5, 5),
(1000, 100)])
def test_kuiper_two_uniform(N, M):
with NumpyRNGContext(12345):
assert funcs.kuiper_two(np.random.random(N),
np.random.random(M))[1] > 0.01
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('N,M', [(100, 100),
(20, 100),
(100, 20),
(10, 20),
(5, 5),
(1000, 100)])
def test_kuiper_two_nonuniform(N, M):
with NumpyRNGContext(12345):
assert funcs.kuiper_two(np.random.random(N)**2,
np.random.random(M)**2)[1] > 0.01
@pytest.mark.skipif('not HAS_SCIPY')
def test_detect_kuiper_two_different():
with NumpyRNGContext(12345):
D, f = funcs.kuiper_two(np.random.random(500) * 0.5,
np.random.random(500))
assert f < 0.01
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('N,M', [(100, 100),
(20, 100),
(100, 20),
(10, 20),
(5, 5),
(1000, 100)])
def test_fpp_kuiper_two(N, M):
with NumpyRNGContext(12345):
R = 100
fpp = 0.05
fps = 0
for i in range(R):
D, f = funcs.kuiper_two(np.random.random(N), np.random.random(M))
if f < fpp:
fps += 1
assert scipy.stats.binom(R, fpp).sf(fps - 1) > 0.005
assert scipy.stats.binom(R, fpp).cdf(fps - 1) > 0.005
@pytest.mark.skipif('not HAS_SCIPY')
def test_histogram():
with NumpyRNGContext(1234):
a, b = 0.3, 3.14
s = np.random.uniform(a, b, 10000) % 1
b, w = funcs.fold_intervals([(a, b, 1. / (b - a))])
h = funcs.histogram_intervals(16, b, w)
nn, bb = np.histogram(s, bins=len(h), range=(0, 1))
uu = np.sqrt(nn)
nn, uu = len(h) * nn / h / len(s), len(h) * uu / h / len(s)
c2 = np.sum(((nn - 1) / uu)**2)
assert scipy.stats.chi2(len(h)).cdf(c2) > 0.01
assert scipy.stats.chi2(len(h)).sf(c2) > 0.01
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize("ii,rr", [
((4, (0, 1), (1,)), (1, 1, 1, 1)),
((2, (0, 1), (1,)), (1, 1)),
((4, (0, 0.5, 1), (1, 1)), (1, 1, 1, 1)),
((4, (0, 0.5, 1), (1, 2)), (1, 1, 2, 2)),
((3, (0, 0.5, 1), (1, 2)), (1, 1.5, 2)),
])
def test_histogram_intervals_known(ii, rr):
with NumpyRNGContext(1234):
assert_allclose(funcs.histogram_intervals(*ii), rr)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('N,m,p', [pytest.param(100, 10000, 0.01,
marks=pytest.mark.skip('Test too slow')),
pytest.param(300, 10000, 0.001,
marks=pytest.mark.skip('Test too slow')),
(10, 10000, 0.001),
(3, 10000, 0.001),
])
def test_uniform_binomial(N, m, p):
"""Check that the false positive probability is right
In particular, run m trials with N uniformly-distributed photons
and check that the number of false positives is consistent with
a binomial distribution. The more trials, the tighter the bounds
but the longer the runtime.
"""
with NumpyRNGContext(1234):
fpps = np.array([funcs.kuiper(np.random.random(N))[1]
for i in range(m)])
assert (fpps >= 0).all()
assert (fpps <= 1).all()
low = scipy.stats.binom(n=m, p=p).ppf(0.01)
high = scipy.stats.binom(n=m, p=p).ppf(0.99)
assert (low < sum(fpps < p) < high)
|
d7b0618f9a7054462a58070e3e5caef2e319dab97ef13d83211d671d841dd924 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_almost_equal_nulp
from astropy.stats.biweight import (biweight_location, biweight_scale,
biweight_midvariance, biweight_midcovariance,
biweight_midcorrelation)
from astropy.tests.helper import catch_warnings
from astropy.utils.misc import NumpyRNGContext
def test_biweight_location():
with NumpyRNGContext(12345):
# test that it runs
randvar = np.random.randn(10000)
cbl = biweight_location(randvar)
assert abs(cbl - 0) < 1e-2
def test_biweight_location_constant():
cbl = biweight_location(np.ones((10, 5)))
assert cbl == 1.
def test_biweight_location_constant_axis_2d():
shape = (10, 5)
data = np.ones(shape)
cbl = biweight_location(data, axis=0)
assert_allclose(cbl, np.ones(shape[1]))
cbl = biweight_location(data, axis=1)
assert_allclose(cbl, np.ones(shape[0]))
val1 = 100.
val2 = 2.
data = np.arange(50).reshape(10, 5)
data[2] = val1
data[7] = val2
cbl = biweight_location(data, axis=1)
assert_allclose(cbl[2], val1)
assert_allclose(cbl[7], val2)
def test_biweight_location_constant_axis_3d():
shape = (10, 5, 2)
data = np.ones(shape)
cbl = biweight_location(data, axis=0)
assert_allclose(cbl, np.ones((shape[1], shape[2])))
cbl = biweight_location(data, axis=1)
assert_allclose(cbl, np.ones((shape[0], shape[2])))
cbl = biweight_location(data, axis=2)
assert_allclose(cbl, np.ones((shape[0], shape[1])))
def test_biweight_location_small():
cbl = biweight_location([1, 3, 5, 500, 2])
assert abs(cbl - 2.745) < 1e-3
def test_biweight_location_axis():
"""Test a 2D array with the axis keyword."""
with NumpyRNGContext(12345):
ny = 100
nx = 200
data = np.random.normal(5, 2, (ny, nx))
bw = biweight_location(data, axis=0)
bwi = []
for i in range(nx):
bwi.append(biweight_location(data[:, i]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
bw = biweight_location(data, axis=1)
bwi = []
for i in range(ny):
bwi.append(biweight_location(data[i, :]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
def test_biweight_location_axis_3d():
"""Test a 3D array with the axis keyword."""
with NumpyRNGContext(12345):
nz = 3
ny = 4
nx = 5
data = np.random.normal(5, 2, (nz, ny, nx))
bw = biweight_location(data, axis=0)
assert bw.shape == (ny, nx)
y = 0
bwi = []
for i in range(nx):
bwi.append(biweight_location(data[:, y, i]))
bwi = np.array(bwi)
assert_allclose(bw[y], bwi)
def test_biweight_scale():
# NOTE: biweight_scale is covered by biweight_midvariance tests
data = [1, 3, 5, 500, 2]
scl = biweight_scale(data)
var = biweight_midvariance(data)
assert_allclose(scl, np.sqrt(var))
def test_biweight_midvariance():
with NumpyRNGContext(12345):
# test that it runs
randvar = np.random.randn(10000)
var = biweight_midvariance(randvar)
assert_allclose(var, 1.0, rtol=0.02)
def test_biweight_midvariance_small():
data = [1, 3, 5, 500, 2]
var = biweight_midvariance(data)
assert_allclose(var, 2.9238456) # verified with R
var = biweight_midvariance(data, modify_sample_size=True)
assert_allclose(var, 2.3390765)
def test_biweight_midvariance_5127():
# test a regression introduced in #5127
rand = np.random.RandomState(12345)
data = rand.normal(loc=0., scale=20., size=(100, 100))
var = biweight_midvariance(data)
assert_allclose(var, 406.86938710817344) # verified with R
def test_biweight_midvariance_axis():
"""Test a 2D array with the axis keyword."""
with NumpyRNGContext(12345):
ny = 100
nx = 200
data = np.random.normal(5, 2, (ny, nx))
bw = biweight_midvariance(data, axis=0)
bwi = []
for i in range(nx):
bwi.append(biweight_midvariance(data[:, i]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
bw = biweight_midvariance(data, axis=1)
bwi = []
for i in range(ny):
bwi.append(biweight_midvariance(data[i, :]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
def test_biweight_midvariance_axis_3d():
"""Test a 3D array with the axis keyword."""
with NumpyRNGContext(12345):
nz = 3
ny = 4
nx = 5
data = np.random.normal(5, 2, (nz, ny, nx))
bw = biweight_midvariance(data, axis=0)
assert bw.shape == (ny, nx)
y = 0
bwi = []
for i in range(nx):
bwi.append(biweight_midvariance(data[:, y, i]))
bwi = np.array(bwi)
assert_allclose(bw[y], bwi)
def test_biweight_midvariance_constant_axis():
bw = biweight_midvariance(np.ones((10, 5)))
assert bw == 0.0
def test_biweight_midvariance_constant_axis_2d():
shape = (10, 5)
data = np.ones(shape)
cbl = biweight_midvariance(data, axis=0)
assert_allclose(cbl, np.zeros(shape[1]))
cbl = biweight_midvariance(data, axis=1)
assert_allclose(cbl, np.zeros(shape[0]))
data = np.arange(50).reshape(10, 5)
data[2] = 100.
data[7] = 2.
bw = biweight_midvariance(data, axis=1)
assert_allclose(bw[2], 0.)
assert_allclose(bw[7], 0.)
def test_biweight_midvariance_constant_axis_3d():
shape = (10, 5, 2)
data = np.ones(shape)
cbl = biweight_midvariance(data, axis=0)
assert_allclose(cbl, np.zeros((shape[1], shape[2])))
cbl = biweight_midvariance(data, axis=1)
assert_allclose(cbl, np.zeros((shape[0], shape[2])))
cbl = biweight_midvariance(data, axis=2)
assert_allclose(cbl, np.zeros((shape[0], shape[1])))
def test_biweight_midcovariance_1d():
d = [0, 1, 2]
cov = biweight_midcovariance(d)
var = biweight_midvariance(d)
assert_allclose(cov, [[var]])
def test_biweight_midcovariance_2d():
d = [[0, 1, 2], [2, 1, 0]]
cov = biweight_midcovariance(d)
val = 0.70121809
assert_allclose(cov, [[val, -val], [-val, val]]) # verified with R
d = [[5, 1, 10], [500, 5, 2]]
cov = biweight_midcovariance(d)
assert_allclose(cov, [[14.54159077, -7.79026256], # verified with R
[-7.79026256, 6.92087252]])
cov = biweight_midcovariance(d, modify_sample_size=True)
assert_allclose(cov, [[14.54159077, -5.19350838],
[-5.19350838, 4.61391501]])
def test_biweight_midcovariance_constant():
data = np.ones((3, 10))
cov = biweight_midcovariance(data)
assert_allclose(cov, np.zeros((3, 3)))
def test_biweight_midcovariance_midvariance():
"""
Test that biweight_midcovariance diagonal elements agree with
biweight_midvariance.
"""
rng = np.random.RandomState(1)
d = rng.normal(0, 2, size=(100, 3))
cov = biweight_midcovariance(d)
var = [biweight_midvariance(a) for a in d]
assert_allclose(cov.diagonal(), var)
cov2 = biweight_midcovariance(d, modify_sample_size=True)
var2 = [biweight_midvariance(a, modify_sample_size=True)
for a in d]
assert_allclose(cov2.diagonal(), var2)
def test_midcovariance_shape():
"""
Test that biweight_midcovariance raises error with a 3D array.
"""
d = np.ones(27).reshape(3, 3, 3)
with pytest.raises(ValueError) as e:
biweight_midcovariance(d)
assert 'The input array must be 2D or 1D.' in str(e.value)
def test_midcovariance_M_shape():
"""
Test that biweight_midcovariance raises error when M is not a scalar
or 1D array.
"""
d = [0, 1, 2]
M = [[0, 1], [2, 3]]
with pytest.raises(ValueError) as e:
biweight_midcovariance(d, M=M)
assert 'M must be a scalar or 1D array.' in str(e.value)
def test_biweight_midcovariance_symmetric():
"""
Regression test to ensure that midcovariance matrix is symmetric
when ``modify_sample_size=True`` (see #5972).
"""
rng = np.random.RandomState(1)
d = rng.gamma(2, 2, size=(3, 500))
cov = biweight_midcovariance(d)
assert_array_almost_equal_nulp(cov, cov.T, nulp=5)
cov = biweight_midcovariance(d, modify_sample_size=True)
assert_array_almost_equal_nulp(cov, cov.T, nulp=5)
def test_biweight_midcorrelation():
x = [0, 1, 2]
y = [2, 1, 0]
assert_allclose(biweight_midcorrelation(x, x), 1.0)
assert_allclose(biweight_midcorrelation(x, y), -1.0)
x = [5, 1, 10, 12.4, 13.2]
y = [500, 5, 2, 7.1, 0.9]
# verified with R
assert_allclose(biweight_midcorrelation(x, y), -0.14411038976763313)
def test_biweight_midcorrelation_inputs():
a1 = np.ones((3, 3))
a2 = np.ones(5)
a3 = np.ones(7)
with pytest.raises(ValueError) as e:
biweight_midcorrelation(a1, a2)
assert 'x must be a 1D array.' in str(e.value)
with pytest.raises(ValueError) as e:
biweight_midcorrelation(a2, a1)
assert 'y must be a 1D array.' in str(e.value)
with pytest.raises(ValueError) as e:
biweight_midcorrelation(a2, a3)
assert 'x and y must have the same shape.' in str(e.value)
def test_biweight_32bit_runtime_warnings():
"""Regression test for #6905."""
with NumpyRNGContext(12345):
data = np.random.random(100).astype(np.float32)
data[50] = 30000.
with catch_warnings(RuntimeWarning) as warning_lines:
biweight_scale(data)
assert len(warning_lines) == 0
with catch_warnings(RuntimeWarning) as warning_lines:
biweight_midvariance(data)
assert len(warning_lines) == 0
|
10344bd4057d97369293867de164d6c3aacb7b561986672d9d012a1507098caf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.table.sorted_array import SortedArray
from astropy.table.table import Table
@pytest.fixture
def array():
# composite index
col0 = np.array([x % 2 for x in range(1, 11)])
col1 = np.array([x for x in range(1, 11)])
t = Table([col0, col1])
t = t[t.argsort()]
return SortedArray(t, t['col1'].copy())
@pytest.fixture
def wide_array():
# array with 100 columns
t = Table([[x] * 10 for x in np.arange(100)])
return SortedArray(t, t['col0'].copy())
def test_array_find(array):
for i in range(1, 11):
print("Searching for {0}".format(i))
assert array.find((i % 2, i)) == [i]
assert array.find((1, 4)) == []
def test_array_range(array):
assert np.all(array.range((0, 8), (1, 3), (True, True)) == [8, 10, 1, 3])
assert np.all(array.range((0, 8), (1, 3), (False, True)) == [10, 1, 3])
assert np.all(array.range((0, 8), (1, 3), (True, False)) == [8, 10, 1])
def test_wide_array(wide_array):
# checks for a previous bug in which the length of a
# sliced SortedArray was set to the number of columns
# instead of the number of elements in each column
first_row = wide_array[:1].data
assert np.all(first_row == Table([[x] for x in np.arange(100)]))
|
e1377ac9f9db499cf4e398ab0bd70dc163ed18c3a23a6b3f073e836350e93369 | import numpy as np
from astropy.table import np_utils
def test_common_dtype():
"""
Test that allowed combinations are those expected.
"""
dtype = [(str('int'), int),
(str('uint8'), np.uint8),
(str('float32'), np.float32),
(str('float64'), np.float64),
(str('str'), 'S2'),
(str('uni'), 'U2'),
(str('bool'), bool),
(str('object'), np.object_)]
arr = np.empty(1, dtype=dtype)
fail = set()
succeed = set()
for name1, type1 in dtype:
for name2, type2 in dtype:
try:
np_utils.common_dtype([arr[name1], arr[name2]])
succeed.add('{0} {1}'.format(name1, name2))
except np_utils.TableMergeError:
fail.add('{0} {1}'.format(name1, name2))
# known bad combinations
bad = set(['str int', 'str bool', 'uint8 bool', 'uint8 str', 'object float32',
'bool object', 'uni uint8', 'int str', 'bool str', 'bool float64',
'bool uni', 'str float32', 'uni float64', 'uni object', 'bool uint8',
'object float64', 'float32 bool', 'str uint8', 'uni bool', 'float64 bool',
'float64 object', 'int bool', 'uni int', 'uint8 object', 'int uni', 'uint8 uni',
'float32 uni', 'object uni', 'bool float32', 'uni float32', 'object str',
'int object', 'str float64', 'object int', 'float64 uni', 'bool int',
'object bool', 'object uint8', 'float32 object', 'str object', 'float64 str',
'float32 str'])
assert fail == bad
good = set(['float64 int', 'int int', 'uint8 float64', 'uint8 int', 'str uni',
'float32 float32', 'float64 float64', 'float64 uint8', 'float64 float32',
'int uint8', 'int float32', 'uni str', 'int float64', 'uint8 float32',
'float32 int', 'float32 uint8', 'bool bool', 'uint8 uint8', 'str str',
'float32 float64', 'object object', 'uni uni'])
assert succeed == good
|
b5a7c9744ae8826cdf4f7dcf07156ea9214652b94151af4a05358be06f635f6e | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import gc
import sys
import copy
from io import StringIO
from collections import OrderedDict
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from astropy.io import fits
from astropy.tests.helper import (assert_follows_unicode_guidelines,
ignore_warnings, catch_warnings)
from astropy.utils.data import get_pkg_data_filename
from astropy import table
from astropy import units as u
from astropy.time import Time, TimeDelta
from .conftest import MaskedTable, MIXIN_COLS
try:
with ignore_warnings(DeprecationWarning):
# Ignore DeprecationWarning on pandas import in Python 3.5--see
# https://github.com/astropy/astropy/issues/4380
import pandas # pylint: disable=W0611
except ImportError:
HAS_PANDAS = False
else:
HAS_PANDAS = True
class SetupData:
def _setup(self, table_types):
self._table_type = table_types.Table
self._column_type = table_types.Column
@property
def a(self):
if self._column_type is not None:
if not hasattr(self, '_a'):
self._a = self._column_type(
[1, 2, 3], name='a', format='%d',
meta={'aa': [0, 1, 2, 3, 4]})
return self._a
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, '_b'):
self._b = self._column_type(
[4, 5, 6], name='b', format='%d', meta={'aa': 1})
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, '_c'):
self._c = self._column_type([7, 8, 9], 'c')
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, '_d'):
self._d = self._column_type([7, 8, 7], 'd')
return self._d
@property
def obj(self):
if self._column_type is not None:
if not hasattr(self, '_obj'):
self._obj = self._column_type([1, 'string', 3], 'obj', dtype='O')
return self._obj
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a, self.b])
return self._t
@pytest.mark.usefixtures('table_types')
class TestSetTableColumn(SetupData):
def test_set_row(self, table_types):
"""Set a row from a tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[1] = (20, 21)
assert t['a'][0] == 1
assert t['a'][1] == 20
assert t['a'][2] == 3
assert t['b'][0] == 4
assert t['b'][1] == 21
assert t['b'][2] == 6
def test_set_row_existing(self, table_types):
"""Set a row from another existing row"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[0] = t[1]
assert t[0][0] == 2
assert t[0][1] == 5
def test_set_row_fail_1(self, table_types):
"""Set a row from an incorrectly-sized or typed set of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = (20, 21, 22)
with pytest.raises(ValueError):
t[1] = 0
def test_set_row_fail_2(self, table_types):
"""Set a row from an incorrectly-typed tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = ('abc', 'def')
def test_set_new_col_new_table(self, table_types):
"""Create a new column in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t['aa'] = self.a
# Test that the new column name is 'aa' and that the values match
assert np.all(t['aa'] == self.a)
assert t.colnames == ['aa']
def test_set_new_col_new_table_quantity(self, table_types):
"""Create a new column (from a quantity) in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t['aa'] = np.array([1, 2, 3]) * u.m
assert np.all(t['aa'] == np.array([1, 2, 3]))
assert t['aa'].unit == u.m
t['bb'] = 3 * u.m
assert np.all(t['bb'] == 3)
assert t['bb'].unit == u.m
def test_set_new_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Add a column
t['bb'] = self.b
assert np.all(t['bb'] == self.b)
assert t.colnames == ['a', 'bb']
assert t['bb'].meta == self.b.meta
assert t['bb'].format == self.b.format
# Add another column
t['c'] = t['a']
assert np.all(t['c'] == t['a'])
assert t.colnames == ['a', 'bb', 'c']
assert t['c'].meta == t['a'].meta
assert t['c'].format == t['a'].format
# Add a multi-dimensional column
t['d'] = table_types.Column(np.arange(12).reshape(3, 2, 2))
assert t['d'].shape == (3, 2, 2)
assert t['d'][0, 0, 1] == 1
# Add column from a list
t['e'] = ['hello', 'the', 'world']
assert np.all(t['e'] == np.array(['hello', 'the', 'world']))
# Make sure setting existing column still works
t['e'] = ['world', 'hello', 'the']
assert np.all(t['e'] == np.array(['world', 'hello', 'the']))
# Add a column via broadcasting
t['f'] = 10
assert np.all(t['f'] == 10)
# Add a column from a Quantity
t['g'] = np.array([1, 2, 3]) * u.m
assert np.all(t['g'].data == np.array([1, 2, 3]))
assert t['g'].unit == u.m
# Add a column from a (scalar) Quantity
t['g'] = 3 * u.m
assert np.all(t['g'].data == 3)
assert t['g'].unit == u.m
def test_set_new_unmasked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.Column(name='b', data=[1, 2, 3]) # unmasked
t['b'] = b
assert np.all(t['b'] == b)
def test_set_new_masked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.MaskedColumn(name='b', data=[1, 2, 3]) # masked
t['b'] = b
assert np.all(t['b'] == b)
def test_set_new_col_existing_table_fail(self, table_types):
"""Generate failure when creating a new column using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Wrong size
with pytest.raises(ValueError):
t['b'] = [1, 2]
@pytest.mark.usefixtures('table_types')
class TestEmptyData():
def test_1(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', dtype=int, length=100))
assert len(t['a']) == 100
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', dtype=int, shape=(3, ), length=100))
assert len(t['a']) == 100
def test_3(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name='a', dtype=int))
assert len(t['a']) == 0
def test_4(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name='a', dtype=int, shape=(3, 4)))
assert len(t['a']) == 0
def test_5(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a')) # dtype is not specified
assert len(t['a']) == 0
def test_add_via_setitem_and_slice(self, table_types):
"""Test related to #3023 where a MaskedColumn is created with name=None
and then gets changed to name='a'. After PR #2790 this test fails
without the #3023 fix."""
t = table_types.Table()
t['a'] = table_types.Column([1, 2, 3])
t2 = t[:]
assert t2.colnames == t.colnames
@pytest.mark.usefixtures('table_types')
class TestNewFromColumns():
def test_simple(self, table_types):
cols = [table_types.Column(name='a', data=[1, 2, 3]),
table_types.Column(name='b', data=[4, 5, 6], dtype=np.float32)]
t = table_types.Table(cols)
assert np.all(t['a'].data == np.array([1, 2, 3]))
assert np.all(t['b'].data == np.array([4, 5, 6], dtype=np.float32))
assert type(t['b'][1]) is np.float32
def test_from_np_array(self, table_types):
cols = [table_types.Column(name='a', data=np.array([1, 2, 3], dtype=np.int64),
dtype=np.float64),
table_types.Column(name='b', data=np.array([4, 5, 6], dtype=np.float32))]
t = table_types.Table(cols)
assert np.all(t['a'] == np.array([1, 2, 3], dtype=np.float64))
assert np.all(t['b'] == np.array([4, 5, 6], dtype=np.float32))
assert type(t['a'][1]) is np.float64
assert type(t['b'][1]) is np.float32
def test_size_mismatch(self, table_types):
cols = [table_types.Column(name='a', data=[1, 2, 3]),
table_types.Column(name='b', data=[4, 5, 6, 7])]
with pytest.raises(ValueError):
table_types.Table(cols)
def test_name_none(self, table_types):
"""Column with name=None can init a table whether or not names are supplied"""
c = table_types.Column(data=[1, 2], name='c')
d = table_types.Column(data=[3, 4])
t = table_types.Table([c, d], names=(None, 'd'))
assert t.colnames == ['c', 'd']
t = table_types.Table([c, d])
assert t.colnames == ['c', 'col1']
@pytest.mark.usefixtures('table_types')
class TestReverse():
def test_reverse(self, table_types):
t = table_types.Table([[1, 2, 3],
['a', 'b', 'cc']])
t.reverse()
assert np.all(t['col0'] == np.array([3, 2, 1]))
assert np.all(t['col1'] == np.array(['cc', 'b', 'a']))
t2 = table_types.Table(t, copy=False)
assert np.all(t2['col0'] == np.array([3, 2, 1]))
assert np.all(t2['col1'] == np.array(['cc', 'b', 'a']))
t2 = table_types.Table(t, copy=True)
assert np.all(t2['col0'] == np.array([3, 2, 1]))
assert np.all(t2['col1'] == np.array(['cc', 'b', 'a']))
t2.sort('col0')
assert np.all(t2['col0'] == np.array([1, 2, 3]))
assert np.all(t2['col1'] == np.array(['a', 'b', 'cc']))
def test_reverse_big(self, table_types):
x = np.arange(10000)
y = x + 1
t = table_types.Table([x, y], names=('x', 'y'))
t.reverse()
assert np.all(t['x'] == x[::-1])
assert np.all(t['y'] == y[::-1])
@pytest.mark.usefixtures('table_types')
class TestColumnAccess():
def test_1(self, table_types):
t = table_types.Table()
with pytest.raises(KeyError):
t['a']
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[1, 2, 3]))
assert np.all(t['a'] == np.array([1, 2, 3]))
with pytest.raises(KeyError):
t['b'] # column does not exist
def test_itercols(self, table_types):
names = ['a', 'b', 'c']
t = table_types.Table([[1], [2], [3]], names=names)
for name, col in zip(names, t.itercols()):
assert name == col.name
assert isinstance(col, table_types.Column)
@pytest.mark.usefixtures('table_types')
class TestAddLength(SetupData):
def test_right_length(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b)
def test_too_long(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='b', data=[4, 5, 6, 7])) # data too long
def test_too_short(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='b', data=[4, 5])) # data too short
@pytest.mark.usefixtures('table_types')
class TestAddPosition(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 0)
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 1)
def test_3(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, -1)
def test_5(self, table_types):
self._setup(table_types)
t = table_types.Table()
with pytest.raises(ValueError):
t.index_column('b')
def test_6(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b)
assert t.columns.keys() == ['a', 'b']
def test_7(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column('a'))
assert t.columns.keys() == ['b', 'a']
def test_8(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column('a') + 1)
assert t.columns.keys() == ['a', 'b']
def test_9(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b, t.index_column('a') + 1)
t.add_column(self.c, t.index_column('b'))
assert t.columns.keys() == ['a', 'c', 'b']
def test_10(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
ia = t.index_column('a')
t.add_column(self.b, ia + 1)
t.add_column(self.c, ia)
assert t.columns.keys() == ['c', 'a', 'b']
@pytest.mark.usefixtures('table_types')
class TestAddName(SetupData):
def test_override_name(self, table_types):
self._setup(table_types)
t = table_types.Table()
# Check that we can override the name of the input column in the Table
t.add_column(self.a, name='b')
t.add_column(self.b, name='a')
assert t.columns.keys() == ['b', 'a']
# Check that we did not change the name of the input column
assert self.a.info.name == 'a'
assert self.b.info.name == 'b'
# Now test with an input column from another table
t2 = table_types.Table()
t2.add_column(t['a'], name='c')
assert t2.columns.keys() == ['c']
# Check that we did not change the name of the input column
assert t.columns.keys() == ['b', 'a']
# Check that we can give a name if none was present
col = table_types.Column([1, 2, 3])
t.add_column(col, name='c')
assert t.columns.keys() == ['b', 'a', 'c']
def test_default_name(self, table_types):
t = table_types.Table()
col = table_types.Column([1, 2, 3])
t.add_column(col)
assert t.columns.keys() == ['col0']
@pytest.mark.usefixtures('table_types')
class TestInitFromTable(SetupData):
def test_from_table_cols(self, table_types):
"""Ensure that using cols from an existing table gives
a clean copy.
"""
self._setup(table_types)
t = self.t
cols = t.columns
# Construct Table with cols via Table._new_from_cols
t2a = table_types.Table([cols['a'], cols['b'], self.c])
# Construct with add_column
t2b = table_types.Table()
t2b.add_column(cols['a'])
t2b.add_column(cols['b'])
t2b.add_column(self.c)
t['a'][1] = 20
t['b'][1] = 21
for t2 in [t2a, t2b]:
t2['a'][2] = 10
t2['b'][2] = 11
t2['c'][2] = 12
t2.columns['a'].meta['aa'][3] = 10
assert np.all(t['a'] == np.array([1, 20, 3]))
assert np.all(t['b'] == np.array([4, 21, 6]))
assert np.all(t2['a'] == np.array([1, 2, 10]))
assert np.all(t2['b'] == np.array([4, 5, 11]))
assert np.all(t2['c'] == np.array([7, 8, 12]))
assert t2['a'].name == 'a'
assert t2.columns['a'].meta['aa'][3] == 10
assert t.columns['a'].meta['aa'][3] == 3
@pytest.mark.usefixtures('table_types')
class TestAddColumns(SetupData):
def test_add_columns1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c])
assert t.colnames == ['a', 'b', 'c']
def test_add_columns2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d])
assert t.colnames == ['a', 'b', 'c', 'd']
assert np.all(t['c'] == np.array([7, 8, 9]))
def test_add_columns3(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[1, 0])
assert t.colnames == ['d', 'a', 'c', 'b']
def test_add_columns4(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[0, 0])
assert t.colnames == ['c', 'd', 'a', 'b']
def test_add_columns5(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[2, 2])
assert t.colnames == ['a', 'b', 'c', 'd']
def test_add_columns6(self, table_types):
"""Check that we can override column names."""
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c], names=['b', 'c', 'a'])
assert t.colnames == ['b', 'c', 'a']
def test_add_columns7(self, table_types):
"""Check that default names are used when appropriate."""
t = table_types.Table()
col0 = table_types.Column([1, 2, 3])
col1 = table_types.Column([4, 5, 3])
t.add_columns([col0, col1])
assert t.colnames == ['col0', 'col1']
def test_add_duplicate_column(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='a', data=[0, 1, 2]))
t.add_column(table_types.Column(name='a', data=[0, 1, 2]),
rename_duplicate=True)
t.add_column(self.b)
t.add_column(self.c)
assert t.colnames == ['a', 'a_1', 'b', 'c']
t.add_column(table_types.Column(name='a', data=[0, 1, 2]),
rename_duplicate=True)
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2']
# test adding column from a separate Table
t1 = table_types.Table()
t1.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(t1['a'])
t.add_column(t1['a'], rename_duplicate=True)
t1['a'][0] = 100 # Change original column
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2', 'a_3']
assert t1.colnames == ['a']
# Check new column didn't change (since name conflict forced a copy)
assert t['a_3'][0] == self.a[0]
# Check that rename_duplicate=True is ok if there are no duplicates
t.add_column(table_types.Column(name='q', data=[0, 1, 2]),
rename_duplicate=True)
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2', 'a_3', 'q']
def test_add_duplicate_columns(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.c])
with pytest.raises(ValueError):
t.add_columns([table_types.Column(name='a', data=[0, 1, 2]), table_types.Column(name='b', data=[0, 1, 2])])
t.add_columns([table_types.Column(name='a', data=[0, 1, 2]),
table_types.Column(name='b', data=[0, 1, 2])],
rename_duplicate=True)
t.add_column(self.d)
assert t.colnames == ['a', 'b', 'c', 'a_1', 'b_1', 'd']
@pytest.mark.usefixtures('table_types')
class TestAddRow(SetupData):
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, '_b'):
self._b = self._column_type(name='b', data=[4.0, 5.1, 6.2])
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, '_c'):
self._c = self._column_type(name='c', data=['7', '8', '9'])
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, '_d'):
self._d = self._column_type(name='d', data=[[1, 2], [3, 4], [5, 6]])
return self._d
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a, self.b, self.c])
return self._t
def test_add_none_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=('a', 'b', 'c'), dtype=('(2,)i', 'S4', 'O'))
t.add_row()
assert np.all(t['a'][0] == [0, 0])
assert t['b'][0] == ''
assert t['c'][0] == 0
t.add_row()
assert np.all(t['a'][1] == [0, 0])
assert t['b'][1] == ''
assert t['c'][1] == 0
def test_add_stuff_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=('a', 'b', 'obj'), dtype=('(2,)i', 'S8', 'O'))
t.add_row([[1, 2], 'hello', 'world'])
assert np.all(t['a'][0] == [1, 2])
assert t['b'][0] == 'hello'
assert t['obj'][0] == 'world'
# Make sure it is not repeating last row but instead
# adding zeros (as documented)
t.add_row()
assert np.all(t['a'][1] == [0, 0])
assert t['b'][1] == ''
assert t['obj'][1] == 0
def test_add_table_row(self, table_types):
self._setup(table_types)
t = self.t
t['d'] = self.d
t2 = table_types.Table([self.a, self.b, self.c, self.d])
t.add_row(t2[0])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 1]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t['c'] == np.array(['7', '8', '9', '7']))
assert np.all(t['d'] == np.array([[1, 2], [3, 4], [5, 6], [1, 2]]))
def test_add_table_row_obj(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.obj])
t.add_row([1, 4.0, [10]])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 1]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t['obj'] == np.array([1, 'string', 3, [10]], dtype='O'))
def test_add_qtable_row_multidimensional(self):
q = [[1, 2], [3, 4]] * u.m
qt = table.QTable([q])
qt.add_row(([5, 6] * u.km,))
assert np.all(qt['col0'] == [[1, 2], [3, 4], [5000, 6000]] * u.m)
def test_add_with_tuple(self, table_types):
self._setup(table_types)
t = self.t
t.add_row((4, 7.2, '1'))
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t['c'] == np.array(['7', '8', '9', '1']))
def test_add_with_list(self, table_types):
self._setup(table_types)
t = self.t
t.add_row([4, 7.2, '10'])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t['c'] == np.array(['7', '8', '9', '1']))
def test_add_with_dict(self, table_types):
self._setup(table_types)
t = self.t
t.add_row({'a': 4, 'b': 7.2})
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
if t.masked:
assert np.all(t['c'] == np.array(['7', '8', '9', '7']))
else:
assert np.all(t['c'] == np.array(['7', '8', '9', '']))
def test_add_with_none(self, table_types):
self._setup(table_types)
t = self.t
t.add_row()
assert len(t) == 4
assert np.all(t['a'].data == np.array([1, 2, 3, 0]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 0.0]))
assert np.all(t['c'].data == np.array(['7', '8', '9', '']))
def test_add_missing_column(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row({'bad_column': 1})
def test_wrong_size_tuple(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row((1, 2))
def test_wrong_vals_type(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(TypeError):
t.add_row(1)
def test_add_row_failures(self, table_types):
self._setup(table_types)
t = self.t
t_copy = table_types.Table(t, copy=True)
# Wrong number of columns
try:
t.add_row([1, 2, 3, 4])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
# Wrong data type
try:
t.add_row(['one', 2, 3])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
def test_insert_table_row(self, table_types):
"""
Light testing of Table.insert_row() method. The deep testing is done via
the add_row() tests which calls insert_row(index=len(self), ...), so
here just test that the added index parameter is handled correctly.
"""
self._setup(table_types)
row = (10, 40.0, 'x', [10, 20])
for index in range(-3, 4):
indices = np.insert(np.arange(3), index, 3)
t = table_types.Table([self.a, self.b, self.c, self.d])
t2 = t.copy()
t.add_row(row) # By now we know this works
t2.insert_row(index, row)
for name in t.colnames:
if t[name].dtype.kind == 'f':
assert np.allclose(t[name][indices], t2[name])
else:
assert np.all(t[name][indices] == t2[name])
for index in (-4, 4):
t = table_types.Table([self.a, self.b, self.c, self.d])
with pytest.raises(IndexError):
t.insert_row(index, row)
@pytest.mark.usefixtures('table_types')
class TestTableColumn(SetupData):
def test_column_view(self, table_types):
self._setup(table_types)
t = self.t
a = t.columns['a']
a[2] = 10
assert t['a'][2] == 10
@pytest.mark.usefixtures('table_types')
class TestArrayColumns(SetupData):
def test_1d(self, table_types):
self._setup(table_types)
b = table_types.Column(name='b', dtype=int, shape=(2, ), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t['b'].shape == (3, 2)
assert t['b'][0].shape == (2, )
def test_2d(self, table_types):
self._setup(table_types)
b = table_types.Column(name='b', dtype=int, shape=(2, 4), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t['b'].shape == (3, 2, 4)
assert t['b'][0].shape == (2, 4)
def test_3d(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
b = table_types.Column(name='b', dtype=int, shape=(2, 4, 6), length=3)
t.add_column(b)
assert t['b'].shape == (3, 2, 4, 6)
assert t['b'][0].shape == (2, 4, 6)
@pytest.mark.usefixtures('table_types')
class TestRemove(SetupData):
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a])
return self._t
@property
def t2(self):
if self._table_type is not None:
if not hasattr(self, '_t2'):
self._t2 = self._table_type([self.a, self.b, self.c])
return self._t2
def test_1(self, table_types):
self._setup(table_types)
self.t.remove_columns('a')
assert self.t.columns.keys() == []
assert self.t.as_array().size == 0
# Regression test for gh-8640
assert not self.t
assert isinstance(self.t == None, np.ndarray)
assert (self.t == None).size == 0
def test_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_columns('a')
assert self.t.columns.keys() == ['b']
assert self.t.dtype.names == ('b',)
assert np.all(self.t['b'] == np.array([4, 5, 6]))
def test_3(self, table_types):
"""Check remove_columns works for a single column with a name of
more than one character. Regression test against #2699"""
self._setup(table_types)
self.t['new_column'] = self.t['a']
assert 'new_column' in self.t.columns.keys()
self.t.remove_columns('new_column')
assert 'new_column' not in self.t.columns.keys()
def test_remove_nonexistent_row(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
self.t.remove_row(4)
def test_remove_row_0(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(0)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['b'] == np.array([5, 6]))
def test_remove_row_1(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(1)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['a'] == np.array([1, 3]))
def test_remove_row_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(2)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([7, 8]))
def test_remove_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows(slice(0, 2, 1))
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([9]))
def test_remove_row_list(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows([0, 2])
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([8]))
def test_remove_row_preserves_meta(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_rows([0, 2])
assert self.t['a'].meta == {'aa': [0, 1, 2, 3, 4]}
assert self.t.dtype == np.dtype([(str('a'), 'int'),
(str('b'), 'int')])
def test_delitem_row(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[1]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['a'] == np.array([1, 3]))
@pytest.mark.parametrize("idx", [[0, 2], np.array([0, 2])])
def test_delitem_row_list(self, table_types, idx):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[idx]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([8]))
def test_delitem_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[0:2]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([9]))
def test_delitem_row_fail(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[4]
def test_delitem_row_float(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[1.]
def test_delitem1(self, table_types):
self._setup(table_types)
del self.t['a']
assert self.t.columns.keys() == []
assert self.t.as_array().size == 0
# Regression test for gh-8640
assert not self.t
assert isinstance(self.t == None, np.ndarray)
assert (self.t == None).size == 0
def test_delitem2(self, table_types):
self._setup(table_types)
del self.t2['b']
assert self.t2.colnames == ['a', 'c']
def test_delitems(self, table_types):
self._setup(table_types)
del self.t2['a', 'b']
assert self.t2.colnames == ['c']
def test_delitem_fail(self, table_types):
self._setup(table_types)
with pytest.raises(KeyError):
del self.t['d']
@pytest.mark.usefixtures('table_types')
class TestKeep(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns([])
assert t.columns.keys() == []
assert t.as_array().size == 0
# Regression test for gh-8640
assert not t
assert isinstance(t == None, np.ndarray)
assert (t == None).size == 0
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns('b')
assert t.columns.keys() == ['b']
assert t.dtype.names == ('b',)
assert np.all(t['b'] == np.array([4, 5, 6]))
@pytest.mark.usefixtures('table_types')
class TestRename(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.rename_column('a', 'b')
assert t.columns.keys() == ['b']
assert t.dtype.names == ('b',)
assert np.all(t['b'] == np.array([1, 2, 3]))
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.rename_column('a', 'c')
t.rename_column('b', 'a')
assert t.columns.keys() == ['c', 'a']
assert t.dtype.names == ('c', 'a')
if t.masked:
assert t.mask.dtype.names == ('c', 'a')
assert np.all(t['c'] == np.array([1, 2, 3]))
assert np.all(t['a'] == np.array([4, 5, 6]))
def test_rename_by_attr(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t['a'].name = 'c'
t['b'].name = 'a'
assert t.columns.keys() == ['c', 'a']
assert t.dtype.names == ('c', 'a')
assert np.all(t['c'] == np.array([1, 2, 3]))
assert np.all(t['a'] == np.array([4, 5, 6]))
def test_rename_columns(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.c])
t.rename_columns(('a', 'b', 'c'), ('aa', 'bb', 'cc'))
assert t.colnames == ['aa', 'bb', 'cc']
t.rename_columns(['bb', 'cc'], ['b', 'c'])
assert t.colnames == ['aa', 'b', 'c']
with pytest.raises(TypeError):
t.rename_columns(('aa'), ['a'])
with pytest.raises(ValueError):
t.rename_columns(['a'], ['b', 'c'])
@pytest.mark.usefixtures('table_types')
class TestSort():
def test_single(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4]))
t.add_column(table_types.Column(name='c', data=[(1, 2), (3, 4), (4, 5)]))
assert np.all(t['a'] == np.array([2, 1, 3]))
assert np.all(t['b'] == np.array([6, 5, 4]))
t.sort('a')
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['b'] == np.array([5, 6, 4]))
assert np.all(t['c'] == np.array([[3, 4],
[1, 2],
[4, 5]]))
t.sort('b')
assert np.all(t['a'] == np.array([3, 1, 2]))
assert np.all(t['b'] == np.array([4, 5, 6]))
assert np.all(t['c'] == np.array([[4, 5],
[3, 4],
[1, 2]]))
def test_single_reverse(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4]))
t.add_column(table_types.Column(name='c', data=[(1, 2), (3, 4), (4, 5)]))
assert np.all(t['a'] == np.array([2, 1, 3]))
assert np.all(t['b'] == np.array([6, 5, 4]))
t.sort('a', reverse=True)
assert np.all(t['a'] == np.array([3, 2, 1]))
assert np.all(t['b'] == np.array([4, 6, 5]))
assert np.all(t['c'] == np.array([[4, 5],
[1, 2],
[3, 4]]))
t.sort('b', reverse=True)
assert np.all(t['a'] == np.array([2, 1, 3]))
assert np.all(t['b'] == np.array([6, 5, 4]))
assert np.all(t['c'] == np.array([[1, 2],
[3, 4],
[4, 5]]))
def test_single_big(self, table_types):
"""Sort a big-ish table with a non-trivial sort order"""
x = np.arange(10000)
y = np.sin(x)
t = table_types.Table([x, y], names=('x', 'y'))
t.sort('y')
idx = np.argsort(y)
assert np.all(t['x'] == x[idx])
assert np.all(t['y'] == y[idx])
@pytest.mark.parametrize('reverse', [True, False])
def test_empty_reverse(self, table_types, reverse):
t = table_types.Table([[], []], dtype=['f4', 'U1'])
t.sort('col1', reverse=reverse)
def test_multiple(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t['a'] == np.array([2, 1, 3, 2, 3, 1]))
assert np.all(t['b'] == np.array([6, 5, 4, 3, 5, 4]))
t.sort(['a', 'b'])
assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5]))
t.sort(['b', 'a'])
assert np.all(t['a'] == np.array([2, 1, 3, 1, 3, 2]))
assert np.all(t['b'] == np.array([3, 4, 4, 5, 5, 6]))
t.sort(('a', 'b'))
assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5]))
def test_multiple_reverse(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t['a'] == np.array([2, 1, 3, 2, 3, 1]))
assert np.all(t['b'] == np.array([6, 5, 4, 3, 5, 4]))
t.sort(['a', 'b'], reverse=True)
assert np.all(t['a'] == np.array([3, 3, 2, 2, 1, 1]))
assert np.all(t['b'] == np.array([5, 4, 6, 3, 5, 4]))
t.sort(['b', 'a'], reverse=True)
assert np.all(t['a'] == np.array([2, 3, 1, 3, 1, 2]))
assert np.all(t['b'] == np.array([6, 5, 5, 4, 4, 3]))
t.sort(('a', 'b'), reverse=True)
assert np.all(t['a'] == np.array([3, 3, 2, 2, 1, 1]))
assert np.all(t['b'] == np.array([5, 4, 6, 3, 5, 4]))
def test_multiple_with_bytes(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"]))
t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
t.sort(['name', 'firstname'])
assert np.all([t['firstname'] == np.array([b"John", b"Jo", b"Max"])])
assert np.all([t['name'] == np.array([b"Jackson", b"Miller", b"Miller"])])
assert np.all([t['tel'] == np.array([19, 15, 12])])
def test_multiple_with_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(table_types.Column(
name='firstname',
data=[str(x) for x in ["Max", "Jo", "John"]]))
t.add_column(table_types.Column(
name='name',
data=[str(x) for x in ["Miller", "Miller", "Jackson"]]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
t.sort(['name', 'firstname'])
assert np.all([t['firstname'] == np.array(
[str(x) for x in ["John", "Jo", "Max"]])])
assert np.all([t['name'] == np.array(
[str(x) for x in ["Jackson", "Miller", "Miller"]])])
assert np.all([t['tel'] == np.array([19, 15, 12])])
def test_argsort(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t.argsort() == t.as_array().argsort())
i0 = t.argsort('a')
i1 = t.as_array().argsort(order=['a'])
assert np.all(t['a'][i0] == t['a'][i1])
i0 = t.argsort(['a', 'b'])
i1 = t.as_array().argsort(order=['a', 'b'])
assert np.all(t['a'][i0] == t['a'][i1])
assert np.all(t['b'][i0] == t['b'][i1])
def test_argsort_reverse(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t.argsort(reverse=True) == np.array([4, 2, 0, 3, 1, 5]))
i0 = t.argsort('a', reverse=True)
i1 = np.array([4, 2, 3, 0, 5, 1])
assert np.all(t['a'][i0] == t['a'][i1])
i0 = t.argsort(['a', 'b'], reverse=True)
i1 = np.array([4, 2, 0, 3, 1, 5])
assert np.all(t['a'][i0] == t['a'][i1])
assert np.all(t['b'][i0] == t['b'][i1])
def test_argsort_bytes(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"]))
t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0]))
def test_argsort_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(table_types.Column(
name='firstname',
data=[str(x) for x in ["Max", "Jo", "John"]]))
t.add_column(table_types.Column(
name='name',
data=[str(x) for x in ["Miller", "Miller", "Jackson"]]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0]))
def test_rebuild_column_view_then_rename(self, table_types):
"""
Issue #2039 where renaming fails after any method that calls
_rebuild_table_column_view (this includes sort and add_row).
"""
t = table_types.Table([[1]], names=('a',))
assert t.colnames == ['a']
assert t.dtype.names == ('a',)
t.add_row((2,))
assert t.colnames == ['a']
assert t.dtype.names == ('a',)
t.rename_column('a', 'b')
assert t.colnames == ['b']
assert t.dtype.names == ('b',)
t.sort('b')
assert t.colnames == ['b']
assert t.dtype.names == ('b',)
t.rename_column('b', 'c')
assert t.colnames == ['c']
assert t.dtype.names == ('c',)
@pytest.mark.usefixtures('table_types')
class TestIterator():
def test_iterator(self, table_types):
d = np.array([(2, 1),
(3, 6),
(4, 5)], dtype=[(str('a'), 'i4'), (str('b'), 'i4')])
t = table_types.Table(d)
if t.masked:
with pytest.raises(ValueError):
t[0] == d[0]
else:
for row, np_row in zip(t, d):
assert np.all(row == np_row)
@pytest.mark.usefixtures('table_types')
class TestSetMeta():
def test_set_meta(self, table_types):
d = table_types.Table(names=('a', 'b'))
d.meta['a'] = 1
d.meta['b'] = 1
d.meta['c'] = 1
d.meta['d'] = 1
assert list(d.meta.keys()) == ['a', 'b', 'c', 'd']
@pytest.mark.usefixtures('table_types')
class TestConvertNumpyArray():
def test_convert_numpy_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=('a', 'b'))
np_data = np.array(d)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert np_data is not d.as_array()
assert d.colnames == list(np_data.dtype.names)
np_data = np.array(d, copy=False)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert d.colnames == list(np_data.dtype.names)
with pytest.raises(ValueError):
np_data = np.array(d, dtype=[(str('c'), 'i8'), (str('d'), 'i8')])
def test_as_array_byteswap(self, table_types):
"""Test for https://github.com/astropy/astropy/pull/4080"""
byte_orders = ('>', '<')
native_order = byte_orders[sys.byteorder == 'little']
for order in byte_orders:
col = table_types.Column([1.0, 2.0], name='a', dtype=order + 'f8')
t = table_types.Table([col])
arr = t.as_array()
assert arr['a'].dtype.byteorder in (native_order, '=')
arr = t.as_array(keep_byteorder=True)
if order == native_order:
assert arr['a'].dtype.byteorder in (order, '=')
else:
assert arr['a'].dtype.byteorder == order
def test_byteswap_fits_array(self, table_types):
"""
Test for https://github.com/astropy/astropy/pull/4080, demonstrating
that FITS tables are converted to native byte order.
"""
non_native_order = ('>', '<')[sys.byteorder != 'little']
filename = get_pkg_data_filename('data/tb.fits',
'astropy.io.fits.tests')
t = table_types.Table.read(filename)
arr = t.as_array()
for idx in range(len(arr.dtype)):
assert arr.dtype[idx].byteorder != non_native_order
with fits.open(filename, character_as_bytes=True) as hdul:
data = hdul[1].data
for colname in data.columns.names:
assert np.all(data[colname] == arr[colname])
arr2 = t.as_array(keep_byteorder=True)
for colname in data.columns.names:
assert (data[colname].dtype.byteorder ==
arr2[colname].dtype.byteorder)
def _assert_copies(t, t2, deep=True):
assert t.colnames == t2.colnames
np.testing.assert_array_equal(t.as_array(), t2.as_array())
assert t.meta == t2.meta
for col, col2 in zip(t.columns.values(), t2.columns.values()):
if deep:
assert not np.may_share_memory(col, col2)
else:
assert np.may_share_memory(col, col2)
def test_copy():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'])
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_masked():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'], masked=True,
meta={'name': 'test'})
t['x'].mask == [True, False, True]
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_protocol():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'])
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
_assert_copies(t, t2, deep=False)
_assert_copies(t, t3)
def test_disallow_inequality_comparisons():
"""
Regression test for #828 - disallow comparison operators on whole Table
"""
t = table.Table()
with pytest.raises(TypeError):
t > 2
with pytest.raises(TypeError):
t < 1.1
with pytest.raises(TypeError):
t >= 5.5
with pytest.raises(TypeError):
t <= -1.1
def test_equality():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all((t == t2.as_array()) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t.as_array() == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
def test_equality_masked():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# Make into masked table
t = table.Table(t, masked=True)
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that masking a value causes the row to differ
t.mask['a'][0] = True
assert np.all((t == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([1, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all((t == t2.as_array()) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
@pytest.mark.xfail
def test_equality_masked_bug():
"""
This highlights a Numpy bug. Once it works, it can be moved into the
test_equality_masked test. Related Numpy bug report:
https://github.com/numpy/numpy/issues/3840
"""
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
t = table.Table(t, masked=True)
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
assert np.all((t.as_array() == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
from astropy.utils.tests.test_metadata import MetaBaseTest
class TestMetaTable(MetaBaseTest):
test_class = table.Table
args = ()
def test_unicode_content():
# If we don't have unicode literals then return
if isinstance('', bytes):
return
# Define unicode literals
string_a = 'астрономическая питона'
string_b = 'миллиарды световых лет'
a = table.Table(
[[string_a, 2],
[string_b, 3]],
names=('a', 'b'))
assert string_a in str(a)
# This only works because the coding of this file is utf-8, which
# matches the default encoding of Table.__str__
assert string_a.encode('utf-8') in bytes(a)
def test_unicode_policy():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
assert_follows_unicode_guidelines(t)
@pytest.mark.parametrize('uni', ['питона', 'ascii'])
def test_unicode_bytestring_conversion(table_types, uni):
"""
Test converting columns to all unicode or all bytestring. Thi
makes two columns, one which is unicode (str in Py3) and one which
is bytes (UTF-8 encoded). There are two code paths in the conversions,
a faster one where the data are actually ASCII and a slower one where
UTF-8 conversion is required. This tests both via the ``uni`` param.
"""
byt = uni.encode('utf-8')
t = table_types.Table([[byt], [uni], [1]], dtype=('S', 'U', 'i'))
assert t['col0'].dtype.kind == 'S'
assert t['col1'].dtype.kind == 'U'
assert t['col2'].dtype.kind == 'i'
t['col0'].description = 'col0'
t['col1'].description = 'col1'
t['col0'].meta['val'] = 'val0'
t['col1'].meta['val'] = 'val1'
# Unicode to bytestring
t1 = t.copy()
t1.convert_unicode_to_bytestring()
assert t1['col0'].dtype.kind == 'S'
assert t1['col1'].dtype.kind == 'S'
assert t1['col2'].dtype.kind == 'i'
# Meta made it through
assert t1['col0'].description == 'col0'
assert t1['col1'].description == 'col1'
assert t1['col0'].meta['val'] == 'val0'
assert t1['col1'].meta['val'] == 'val1'
# Need to de-fang the automatic unicode sandwiching of Table
assert np.array(t1['col0'])[0] == byt
assert np.array(t1['col1'])[0] == byt
assert np.array(t1['col2'])[0] == 1
# Bytestring to unicode
t1 = t.copy()
t1.convert_bytestring_to_unicode()
assert t1['col0'].dtype.kind == 'U'
assert t1['col1'].dtype.kind == 'U'
assert t1['col2'].dtype.kind == 'i'
# Meta made it through
assert t1['col0'].description == 'col0'
assert t1['col1'].description == 'col1'
assert t1['col0'].meta['val'] == 'val0'
assert t1['col1'].meta['val'] == 'val1'
# No need to de-fang the automatic unicode sandwiching of Table here, but
# do just for consistency to prove things are working.
assert np.array(t1['col0'])[0] == uni
assert np.array(t1['col1'])[0] == uni
assert np.array(t1['col2'])[0] == 1
def test_table_deletion():
"""
Regression test for the reference cycle discussed in
https://github.com/astropy/astropy/issues/2877
"""
deleted = set()
# A special table subclass which leaves a record when it is finalized
class TestTable(table.Table):
def __del__(self):
deleted.add(id(self))
t = TestTable({'a': [1, 2, 3]})
the_id = id(t)
assert t['a'].parent_table is t
del t
# Cleanup
gc.collect()
assert the_id in deleted
def test_nested_iteration():
"""
Regression test for issue 3358 where nested iteration over a single table fails.
"""
t = table.Table([[0, 1]], names=['a'])
out = []
for r1 in t:
for r2 in t:
out.append((r1['a'], r2['a']))
assert out == [(0, 0), (0, 1), (1, 0), (1, 1)]
def test_table_init_from_degenerate_arrays(table_types):
t = table_types.Table(np.array([]))
assert len(t.columns) == 0
with pytest.raises(ValueError):
t = table_types.Table(np.array(0))
t = table_types.Table(np.array([1, 2, 3]))
assert len(t.columns) == 3
@pytest.mark.skipif('not HAS_PANDAS')
class TestPandas:
def test_simple(self):
t = table.Table()
for endian in ['<', '>']:
for kind in ['f', 'i']:
for byte in ['2', '4', '8']:
dtype = np.dtype(endian + kind + byte)
x = np.array([1, 2, 3], dtype=dtype)
t[endian + kind + byte] = x
t['u'] = ['a', 'b', 'c']
t['s'] = ['a', 'b', 'c']
d = t.to_pandas()
for column in t.columns:
if column == 'u':
assert np.all(t['u'] == np.array(['a', 'b', 'c']))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
elif column == 's':
assert np.all(t['s'] == np.array(['a', 'b', 'c']))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
else:
# We should be able to compare exact values here
assert np.all(t[column] == d[column])
if t[column].dtype.byteorder in ('=', '|'):
assert d[column].dtype == t[column].dtype
else:
assert d[column].dtype == t[column].byteswap().newbyteorder().dtype
# Regression test for astropy/astropy#1156 - the following code gave a
# ValueError: Big-endian buffer not supported on little-endian
# compiler. We now automatically swap the endian-ness to native order
# upon adding the arrays to the data frame.
d[['<i4', '>i4']]
d[['<f4', '>f4']]
t2 = table.Table.from_pandas(d)
for column in t.columns:
if column in ('u', 's'):
assert np.all(t[column] == t2[column])
else:
assert_allclose(t[column], t2[column])
if t[column].dtype.byteorder in ('=', '|'):
assert t[column].dtype == t2[column].dtype
else:
assert t[column].byteswap().newbyteorder().dtype == t2[column].dtype
def test_2d(self):
t = table.Table()
t['a'] = [1, 2, 3]
t['b'] = np.ones((3, 2))
with pytest.raises(ValueError) as exc:
t.to_pandas()
assert (exc.value.args[0] ==
"Cannot convert a table with multi-dimensional columns "
"to a pandas DataFrame. Offending columns are: ['b']")
def test_mixin_pandas(self):
t = table.QTable()
for name in sorted(MIXIN_COLS):
if name != 'ndarray':
t[name] = MIXIN_COLS[name]
t['dt'] = TimeDelta([0, 2, 4, 6], format='sec')
tp = t.to_pandas()
t2 = table.Table.from_pandas(tp)
assert np.allclose(t2['quantity'], [0, 1, 2, 3])
assert np.allclose(t2['longitude'], [0., 1., 5., 6.])
assert np.allclose(t2['latitude'], [5., 6., 10., 11.])
assert np.allclose(t2['skycoord.ra'], [0, 1, 2, 3])
assert np.allclose(t2['skycoord.dec'], [0, 1, 2, 3])
assert np.allclose(t2['arraywrap'], [0, 1, 2, 3])
assert np.allclose(t2['earthlocation.y'], [0, 110708, 547501, 654527], rtol=0, atol=1)
# For pandas, Time, TimeDelta are the mixins that round-trip the class
assert isinstance(t2['time'], Time)
assert np.allclose(t2['time'].jyear, [2000, 2001, 2002, 2003])
assert np.all(t2['time'].isot == ['2000-01-01T12:00:00.000',
'2000-12-31T18:00:00.000',
'2002-01-01T00:00:00.000',
'2003-01-01T06:00:00.000'])
assert t2['time'].format == 'isot'
# TimeDelta
assert isinstance(t2['dt'], TimeDelta)
assert np.allclose(t2['dt'].value, [0, 2, 4, 6])
assert t2['dt'].format == 'sec'
def test_to_pandas_index(self):
import pandas as pd
row_index = pd.RangeIndex(0, 2, 1)
tm_index = pd.DatetimeIndex(['1998-01-01', '2002-01-01'],
dtype='datetime64[ns]',
name='tm', freq=None)
tm = Time([1998, 2002], format='jyear')
x = [1, 2]
t = table.QTable([tm, x], names=['tm', 'x'])
tp = t.to_pandas()
assert np.all(tp.index == row_index)
tp = t.to_pandas(index='tm')
assert np.all(tp.index == tm_index)
t.add_index('tm')
tp = t.to_pandas()
assert np.all(tp.index == tm_index)
# Make sure writing to pandas didn't hack the original table
assert t['tm'].info.indices
tp = t.to_pandas(index=True)
assert np.all(tp.index == tm_index)
tp = t.to_pandas(index=False)
assert np.all(tp.index == row_index)
with pytest.raises(ValueError) as err:
t.to_pandas(index='not a column')
assert 'index must be None, False' in str(err)
def test_mixin_pandas_masked(self):
tm = Time([1, 2, 3], format='cxcsec')
dt = TimeDelta([1, 2, 3], format='sec')
tm[1] = np.ma.masked
dt[1] = np.ma.masked
t = table.QTable([tm, dt], names=['tm', 'dt'])
tp = t.to_pandas()
assert np.all(tp['tm'].isnull() == [False, True, False])
assert np.all(tp['dt'].isnull() == [False, True, False])
t2 = table.Table.from_pandas(tp)
assert np.all(t2['tm'].mask == tm.mask)
assert np.ma.allclose(t2['tm'].jd, tm.jd, rtol=1e-14, atol=1e-14)
assert np.all(t2['dt'].mask == dt.mask)
assert np.ma.allclose(t2['dt'].jd, dt.jd, rtol=1e-14, atol=1e-14)
def test_from_pandas_index(self):
tm = Time([1998, 2002], format='jyear')
x = [1, 2]
t = table.Table([tm, x], names=['tm', 'x'])
tp = t.to_pandas(index='tm')
t2 = table.Table.from_pandas(tp)
assert t2.colnames == ['x']
t2 = table.Table.from_pandas(tp, index=True)
assert t2.colnames == ['tm', 'x']
assert np.allclose(t2['tm'].jyear, tm.jyear)
def test_masking(self):
t = table.Table(masked=True)
t['a'] = [1, 2, 3]
t['a'].mask = [True, False, True]
t['b'] = [1., 2., 3.]
t['b'].mask = [False, False, True]
t['u'] = ['a', 'b', 'c']
t['u'].mask = [False, True, False]
t['s'] = ['a', 'b', 'c']
t['s'].mask = [False, True, False]
# https://github.com/astropy/astropy/issues/7741
t['Source'] = [2584290278794471936, 2584290038276303744,
2584288728310999296]
t['Source'].mask = [False, False, False]
d = t.to_pandas()
t2 = table.Table.from_pandas(d)
for name, column in t.columns.items():
assert np.all(column.data == t2[name].data)
assert np.all(column.mask == t2[name].mask)
# Masked integer type comes back as float. Nothing we can do about this.
if column.dtype.kind == 'i':
if np.any(column.mask):
assert t2[name].dtype.kind == 'f'
else:
assert t2[name].dtype.kind == 'i'
assert_array_equal(column.data,
t2[name].data.astype(column.dtype))
else:
if column.dtype.byteorder in ('=', '|'):
assert column.dtype == t2[name].dtype
else:
assert column.byteswap().newbyteorder().dtype == t2[name].dtype
@pytest.mark.usefixtures('table_types')
class TestReplaceColumn(SetupData):
def test_fail_replace_column(self, table_types):
"""Raise exception when trying to replace column via table.columns object"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t.columns['a'] = [1, 2, 3]
with pytest.raises(ValueError):
t.replace_column('not there', [1, 2, 3])
def test_replace_column(self, table_types):
"""Replace existing column with a new column"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
ta = t['a']
tb = t['b']
vals = [1.2, 3.4, 5.6]
for col in (vals,
table_types.Column(vals),
table_types.Column(vals, name='a'),
table_types.Column(vals, name='b')):
t.replace_column('a', col)
assert np.all(t['a'] == vals)
assert t['a'] is not ta # New a column
assert t['b'] is tb # Original b column unchanged
assert t.colnames == ['a', 'b']
assert t['a'].meta == {}
assert t['a'].format is None
def test_replace_index_column(self, table_types):
"""Replace index column and generate expected exception"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_index('a')
with pytest.raises(ValueError) as err:
t.replace_column('a', [1, 2, 3])
assert err.value.args[0] == 'cannot replace a table index column'
class Test__Astropy_Table__():
"""
Test initializing a Table subclass from a table-like object that
implements the __astropy_table__ interface method.
"""
class SimpleTable:
def __init__(self):
self.columns = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9] * u.m]
self.names = ['a', 'b', 'c']
self.meta = OrderedDict([('a', 1), ('b', 2)])
def __astropy_table__(self, cls, copy, **kwargs):
a, b, c = self.columns
c.info.name = 'c'
cols = [table.Column(a, name='a'),
table.MaskedColumn(b, name='b'),
c]
names = [col.info.name for col in cols]
return cls(cols, names=names, copy=copy, meta=kwargs or self.meta)
def test_simple_1(self):
"""Make a SimpleTable and convert to Table, QTable with copy=False, True"""
for table_cls in (table.Table, table.QTable):
col_c_class = u.Quantity if table_cls is table.QTable else table.MaskedColumn
for cpy in (False, True):
st = self.SimpleTable()
# Test putting in a non-native kwarg `extra_meta` to Table initializer
t = table_cls(st, copy=cpy, extra_meta='extra!')
assert t.colnames == ['a', 'b', 'c']
assert t.meta == {'extra_meta': 'extra!'}
assert np.all(t['a'] == st.columns[0])
assert np.all(t['b'] == st.columns[1])
vals = t['c'].value if table_cls is table.QTable else t['c']
assert np.all(st.columns[2].value == vals)
assert isinstance(t['a'], table.MaskedColumn)
assert isinstance(t['b'], table.MaskedColumn)
assert isinstance(t['c'], col_c_class)
assert t['c'].unit is u.m
assert type(t) is table_cls
# Copy being respected?
t['a'][0] = 10
assert st.columns[0][0] == 1 if cpy else 10
def test_simple_2(self):
"""Test converting a SimpleTable and changing column names and types"""
st = self.SimpleTable()
dtypes = [np.int32, np.float32, np.float16]
names = ['a', 'b', 'c']
meta = OrderedDict([('c', 3)])
t = table.Table(st, dtype=dtypes, names=names, meta=meta)
assert t.colnames == names
assert all(col.dtype.type is dtype
for col, dtype in zip(t.columns.values(), dtypes))
# The supplied meta is overrides the existing meta. Changed in astropy 3.2.
assert t.meta != st.meta
assert t.meta == meta
def test_kwargs_exception(self):
"""If extra kwargs provided but without initializing with a table-like
object, exception is raised"""
with pytest.raises(TypeError) as err:
table.Table([[1]], extra_meta='extra!')
assert '__init__() got unexpected keyword argument' in str(err)
def test_table_meta_copy():
"""
Test no copy vs light (key) copy vs deep copy of table meta for different
situations. #8404.
"""
t = table.Table([[1]])
meta = {1: [1, 2]}
# Assigning meta directly implies using direct object reference
t.meta = meta
assert t.meta is meta
# Table slice implies key copy, so values are unchanged
t2 = t[:]
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is t.meta[1] # Value IS the list same object
# Table init with copy=False implies key copy
t2 = table.Table(t, copy=False)
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is t.meta[1] # Value IS the same list object
# Table init with copy=True implies deep copy
t2 = table.Table(t, copy=True)
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is not t.meta[1] # Value is NOT the same list object
def test_table_meta_copy_with_meta_arg():
"""
Test no copy vs light (key) copy vs deep copy of table meta when meta is
supplied as a table init argument. #8404.
"""
meta = {1: [1, 2]}
meta2 = {2: [3, 4]}
t = table.Table([[1]], meta=meta, copy=False)
assert t.meta is meta
t = table.Table([[1]], meta=meta) # default copy=True
assert t.meta is not meta
assert t.meta == meta
# Test initializing from existing table with meta with copy=False
t2 = table.Table(t, meta=meta2, copy=False)
assert t2.meta is meta2
assert t2.meta != t.meta # Change behavior in #8404
# Test initializing from existing table with meta with default copy=True
t2 = table.Table(t, meta=meta2)
assert t2.meta is not meta2
assert t2.meta != t.meta # Change behavior in #8404
# Table init with copy=True and empty dict meta gets that empty dict
t2 = table.Table(t, copy=True, meta={})
assert t2.meta == {}
# Table init with copy=True and kwarg meta=None gets the original table dict.
# This is a somewhat ambiguous case because it could be interpreted as the
# user wanting NO meta set on the output. This could be implemented by inspecting
# call args.
t2 = table.Table(t, copy=True, meta=None)
assert t2.meta == t.meta
# Test initializing empty table with meta with copy=False
t = table.Table(meta=meta, copy=False)
assert t.meta is meta
assert t.meta[1] is meta[1]
# Test initializing empty table with meta with default copy=True (deepcopy meta)
t = table.Table(meta=meta)
assert t.meta is not meta
assert t.meta == meta
assert t.meta[1] is not meta[1]
def test_replace_column_qtable():
"""Replace existing Quantity column with a new column in a QTable"""
a = [1, 2, 3] * u.m
b = [4, 5, 6]
t = table.QTable([a, b], names=['a', 'b'])
ta = t['a']
tb = t['b']
ta.info.meta = {'aa': [0, 1, 2, 3, 4]}
ta.info.format = '%f'
t.replace_column('a', a.to('cm'))
assert np.all(t['a'] == ta)
assert t['a'] is not ta # New a column
assert t['b'] is tb # Original b column unchanged
assert t.colnames == ['a', 'b']
assert t['a'].info.meta is None
assert t['a'].info.format is None
def test_replace_update_column_via_setitem():
"""
Test table update like ``t['a'] = value``. This leverages off the
already well-tested ``replace_column`` and in-place update
``t['a'][:] = value``, so this testing is fairly light.
"""
a = [1, 2] * u.m
b = [3, 4]
t = table.QTable([a, b], names=['a', 'b'])
assert isinstance(t['a'], u.Quantity)
# Inplace update
ta = t['a']
t['a'] = 5 * u.m
assert np.all(t['a'] == [5, 5] * u.m)
assert t['a'] is ta
# Replace
t['a'] = [5, 6]
assert np.all(t['a'] == [5, 6])
assert isinstance(t['a'], table.Column)
assert t['a'] is not ta
def test_replace_update_column_via_setitem_warnings_normal():
"""
Test warnings related to table replace change in #5556:
Normal warning-free replace
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = 0 # in-place update
assert len(w) == 0
t['a'] = [10, 20, 30] # replace column
assert len(w) == 0
def test_replace_update_column_via_setitem_warnings_slice():
"""
Test warnings related to table replace change in #5556:
Replace a slice, one warning.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t2 = t[:2]
t2['a'] = 0 # in-place slice update
assert np.all(t['a'] == [0, 0, 3])
assert len(w) == 0
t2['a'] = [10, 20] # replace slice
assert len(w) == 1
assert "replaced column 'a' which looks like an array slice" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_attributes():
"""
Test warnings related to table replace change in #5556:
Lost attributes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
t['a'].unit = 'm'
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = [10, 20, 30]
assert len(w) == 1
assert "replaced column 'a' and column attributes ['unit']" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_refcount():
"""
Test warnings related to table replace change in #5556:
Reference count changes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
ta = t['a'] # Generate an extra reference to original column
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = [10, 20, 30]
assert len(w) == 1
assert "replaced column 'a' and the number of references" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_always():
"""
Test warnings related to table replace change in #5556:
Test 'always' setting that raises warning for any replace.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings', ['always']):
t['a'] = 0 # in-place slice update
assert len(w) == 0
from inspect import currentframe, getframeinfo
frameinfo = getframeinfo(currentframe())
t['a'] = [10, 20, 30] # replace column
assert len(w) == 1
assert "replaced column 'a'" == str(w[0].message)
# Make sure the warning points back to the user code line
assert w[0].lineno == frameinfo.lineno + 1
assert w[0].category is table.TableReplaceWarning
assert 'test_table' in w[0].filename
def test_replace_update_column_via_setitem_replace_inplace():
"""
Test the replace_inplace config option related to #5556. In this
case no replace is done.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
ta = t['a']
t['a'].unit = 'm'
with catch_warnings() as w:
with table.conf.set_temp('replace_inplace', True):
with table.conf.set_temp('replace_warnings',
['always', 'refcount', 'attributes', 'slice']):
t['a'] = 0 # in-place update
assert len(w) == 0
assert ta is t['a']
t['a'] = [10, 20, 30] # normally replaces column, but not now
assert len(w) == 0
assert ta is t['a']
assert np.all(t['a'] == [10, 20, 30])
def test_primary_key_is_inherited():
"""Test whether a new Table inherits the primary_key attribute from
its parent Table. Issue #4672"""
t = table.Table([(2, 3, 2, 1), (8, 7, 6, 5)], names=('a', 'b'))
t.add_index('a')
original_key = t.primary_key
# can't test if tuples are equal, so just check content
assert original_key[0] is 'a'
t2 = t[:]
t3 = t.copy()
t4 = table.Table(t)
# test whether the reference is the same in the following
assert original_key == t2.primary_key
assert original_key == t3.primary_key
assert original_key == t4.primary_key
# just test one element, assume rest are equal if assert passes
assert t.loc[1] == t2.loc[1]
assert t.loc[1] == t3.loc[1]
assert t.loc[1] == t4.loc[1]
def test_qtable_read_for_ipac_table_with_char_columns():
'''Test that a char column of a QTable is assigned no unit and not
a dimensionless unit, otherwise conversion of reader output to
QTable fails.'''
t1 = table.QTable([["A"]], names="B")
out = StringIO()
t1.write(out, format="ascii.ipac")
t2 = table.QTable.read(out.getvalue(), format="ascii.ipac", guess=False)
assert t2["B"].unit is None
def test_create_table_from_final_row():
"""Regression test for issue #8422: passing the last row of a table into
Table should return a new table containing that row."""
t1 = table.Table([(1, 2)], names=['col'])
row = t1[-1]
t2 = table.Table(row)['col']
assert t2[0] == 2
def test_key_values_in_as_array():
# Test for cheking column slicing using key_values in Table.as_array()
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
# Creating a table with three columns
t1 = table.Table(rows=data_rows, names=('a', 'b', 'c'),
meta={'name': 'first table'},
dtype=('i4', 'f8', 'S1'))
# Values of sliced column a,b is stored in a numpy array
a = np.array([(1, 2.), (4, 5.), (5, 8.2)],
dtype=[('a', '<i4'), ('b', '<f8')])
# Values fo sliced column c is stored in a numpy array
b = np.array([(b'x',), (b'y',), (b'z',)], dtype=[('c', 'S1')])
# Comparing initialised array with sliced array using Table.as_array()
assert np.array_equal(a, t1.as_array(names=['a', 'b']))
assert np.array_equal(b, t1.as_array(names=['c']))
|
0178ab3e600963a02dfdbc402524ae4b0174e6c71495b0547c138e0909d1bab3 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from io import StringIO
from collections import OrderedDict
from copy import deepcopy
import numpy as np
import pytest
from astropy import units as u
from astropy import time
from astropy import coordinates
from astropy import table
from astropy.table.info import serialize_method_as
from astropy.utils.data_info import data_info_factory, dtype_info_name
from astropy.table.table_helpers import simple_table
def test_table_info_attributes(table_types):
"""
Test the info() method of printing a summary of table column attributes
"""
a = np.array([1, 2, 3], dtype='int32')
b = np.array([1, 2, 3], dtype='float32')
c = np.array(['a', 'c', 'e'], dtype='|S1')
t = table_types.Table([a, b, c], names=['a', 'b', 'c'])
# Minimal output for a typical table
tinfo = t.info(out=None)
subcls = ['class'] if table_types.Table.__name__ == 'MyTable' else []
assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format',
'description', 'class', 'n_bad', 'length']
assert np.all(tinfo['name'] == ['a', 'b', 'c'])
assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1')])
if subcls:
assert np.all(tinfo['class'] == ['MyColumn'] * 3)
# All output fields including a mixin column
t['d'] = [1, 2, 3] * u.m
t['d'].description = 'quantity'
t['a'].format = '%02d'
t['e'] = time.Time([1, 2, 3], format='mjd')
t['e'].info.description = 'time'
t['f'] = coordinates.SkyCoord([1, 2, 3], [1, 2, 3], unit='deg')
t['f'].info.description = 'skycoord'
tinfo = t.info(out=None)
assert np.all(tinfo['name'] == 'a b c d e f'.split())
assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1'), 'float64',
'object', 'object'])
assert np.all(tinfo['unit'] == ['', '', '', 'm', '', 'deg,deg'])
assert np.all(tinfo['format'] == ['%02d', '', '', '', '', ''])
assert np.all(tinfo['description'] == ['', '', '', 'quantity', 'time', 'skycoord'])
cls = t.ColumnClass.__name__
assert np.all(tinfo['class'] == [cls, cls, cls, cls, 'Time', 'SkyCoord'])
# Test that repr(t.info) is same as t.info()
out = StringIO()
t.info(out=out)
assert repr(t.info) == out.getvalue()
def test_table_info_stats(table_types):
"""
Test the info() method of printing a summary of table column statistics
"""
a = np.array([1, 2, 1, 2], dtype='int32')
b = np.array([1, 2, 1, 2], dtype='float32')
c = np.array(['a', 'c', 'e', 'f'], dtype='|S1')
d = time.Time([1, 2, 1, 2], format='mjd')
t = table_types.Table([a, b, c, d], names=['a', 'b', 'c', 'd'])
# option = 'stats'
masked = 'masked=True ' if t.masked else ''
out = StringIO()
t.info('stats', out=out)
table_header_line = '<{0} {1}length=4>'.format(t.__class__.__name__, masked)
exp = [table_header_line,
'name mean std min max',
'---- ---- --- --- ---',
' a 1.5 0.5 1 2',
' b 1.5 0.5 1.0 2.0',
' c -- -- -- --',
' d -- -- 1.0 2.0']
assert out.getvalue().splitlines() == exp
# option = ['attributes', 'stats']
tinfo = t.info(['attributes', 'stats'], out=None)
assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format', 'description',
'class', 'mean', 'std', 'min', 'max', 'n_bad', 'length']
assert np.all(tinfo['mean'] == ['1.5', '1.5', '--', '--'])
assert np.all(tinfo['std'] == ['0.5', '0.5', '--', '--'])
assert np.all(tinfo['min'] == ['1', '1.0', '--', '1.0'])
assert np.all(tinfo['max'] == ['2', '2.0', '--', '2.0'])
out = StringIO()
t.info('stats', out=out)
exp = [table_header_line,
'name mean std min max',
'---- ---- --- --- ---',
' a 1.5 0.5 1 2',
' b 1.5 0.5 1.0 2.0',
' c -- -- -- --',
' d -- -- 1.0 2.0']
assert out.getvalue().splitlines() == exp
# option = ['attributes', custom]
custom = data_info_factory(names=['sum', 'first'],
funcs=[np.sum, lambda col: col[0]])
out = StringIO()
tinfo = t.info(['attributes', custom], out=None)
assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format', 'description',
'class', 'sum', 'first', 'n_bad', 'length']
assert np.all(tinfo['name'] == ['a', 'b', 'c', 'd'])
assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1'), 'object'])
assert np.all(tinfo['sum'] == ['6', '6.0', '--', '--'])
assert np.all(tinfo['first'] == ['1', '1.0', 'a', '1.0'])
def test_data_info():
"""
Test getting info for just a column.
"""
cols = [table.Column([1.0, 2.0, np.nan], name='name',
description='description', unit='m/s'),
table.MaskedColumn([1.0, 2.0, 3.0], name='name',
description='description', unit='m/s',
mask=[False, False, True])]
for c in cols:
# Test getting the full ordered dict
cinfo = c.info(out=None)
assert cinfo == OrderedDict([('name', 'name'),
('dtype', 'float64'),
('shape', ''),
('unit', 'm / s'),
('format', ''),
('description', 'description'),
('class', type(c).__name__),
('n_bad', 1),
('length', 3)])
# Test the console (string) version which omits trivial values
out = StringIO()
c.info(out=out)
exp = ['name = name',
'dtype = float64',
'unit = m / s',
'description = description',
'class = {0}'.format(type(c).__name__),
'n_bad = 1',
'length = 3']
assert out.getvalue().splitlines() == exp
# repr(c.info) gives the same as c.info()
assert repr(c.info) == out.getvalue()
# Test stats info
cinfo = c.info('stats', out=None)
assert cinfo == OrderedDict([('name', 'name'),
('mean', '1.5'),
('std', '0.5'),
('min', '1.0'),
('max', '2.0'),
('n_bad', 1),
('length', 3)])
def test_data_info_subclass():
class Column(table.Column):
"""
Confusingly named Column on purpose, but that is legal.
"""
pass
for data in ([], [1, 2]):
c = Column(data, dtype='int64')
cinfo = c.info(out=None)
assert cinfo == OrderedDict([('dtype', 'int64'),
('shape', ''),
('unit', ''),
('format', ''),
('description', ''),
('class', 'Column'),
('n_bad', 0),
('length', len(data))])
def test_scalar_info():
"""
Make sure info works with scalar values
"""
c = time.Time('2000:001')
cinfo = c.info(out=None)
assert cinfo['n_bad'] == 0
assert 'length' not in cinfo
def test_empty_table():
t = table.Table()
out = StringIO()
t.info(out=out)
exp = ['<Table length=0>', '<No columns>']
assert out.getvalue().splitlines() == exp
def test_class_attribute():
"""
Test that class info column is suppressed only for identical non-mixin
columns.
"""
vals = [[1] * u.m, [2] * u.m]
texp = ['<Table length=1>',
'name dtype unit',
'---- ------- ----',
'col0 float64 m',
'col1 float64 m']
qexp = ['<QTable length=1>',
'name dtype unit class ',
'---- ------- ---- --------',
'col0 float64 m Quantity',
'col1 float64 m Quantity']
for table_cls, exp in ((table.Table, texp),
(table.QTable, qexp)):
t = table_cls(vals)
out = StringIO()
t.info(out=out)
assert out.getvalue().splitlines() == exp
def test_ignore_warnings():
t = table.Table([[np.nan, np.nan]])
with warnings.catch_warnings(record=True) as warns:
t.info('stats', out=None)
assert len(warns) == 0
def test_no_deprecation_warning():
# regression test for #5459, where numpy deprecation warnings were
# emitted unnecessarily.
t = simple_table()
with warnings.catch_warnings(record=True) as warns:
t.info()
assert len(warns) == 0
def test_lost_parent_error():
c = table.Column([1, 2, 3], name='a')
with pytest.raises(AttributeError) as err:
c[:].info.name
assert 'failed access "info" attribute' in str(err)
def test_info_serialize_method():
"""
Unit test of context manager to set info.serialize_method. Normally just
used to set this for writing a Table to file (FITS, ECSV, HDF5).
"""
t = table.Table({'tm': time.Time([1, 2], format='cxcsec'),
'sc': coordinates.SkyCoord([1, 2], [1, 2], unit='deg'),
'mc': table.MaskedColumn([1, 2], mask=[True, False]),
'mc2': table.MaskedColumn([1, 2], mask=[True, False])}
)
origs = {}
for name in ('tm', 'mc', 'mc2'):
origs[name] = deepcopy(t[name].info.serialize_method)
# Test setting by name and getting back to originals
with serialize_method_as(t, {'tm': 'test_tm', 'mc': 'test_mc'}):
for name in ('tm', 'mc'):
assert all(t[name].info.serialize_method[key] == 'test_' + name
for key in t[name].info.serialize_method)
assert t['mc2'].info.serialize_method == origs['mc2']
assert not hasattr(t['sc'].info, 'serialize_method')
for name in ('tm', 'mc', 'mc2'):
assert t[name].info.serialize_method == origs[name] # dict compare
assert not hasattr(t['sc'].info, 'serialize_method')
# Test setting by name and class, where name takes precedence. Also
# test that it works for subclasses.
with serialize_method_as(t, {'tm': 'test_tm', 'mc': 'test_mc',
table.Column: 'test_mc2'}):
for name in ('tm', 'mc', 'mc2'):
assert all(t[name].info.serialize_method[key] == 'test_' + name
for key in t[name].info.serialize_method)
assert not hasattr(t['sc'].info, 'serialize_method')
for name in ('tm', 'mc', 'mc2'):
assert t[name].info.serialize_method == origs[name] # dict compare
assert not hasattr(t['sc'].info, 'serialize_method')
# Test supplying a single string that all applies to all columns with
# a serialize_method.
with serialize_method_as(t, 'test'):
for name in ('tm', 'mc', 'mc2'):
assert all(t[name].info.serialize_method[key] == 'test'
for key in t[name].info.serialize_method)
assert not hasattr(t['sc'].info, 'serialize_method')
for name in ('tm', 'mc', 'mc2'):
assert t[name].info.serialize_method == origs[name] # dict compare
assert not hasattr(t['sc'].info, 'serialize_method')
def test_info_serialize_method_exception():
"""
Unit test of context manager to set info.serialize_method. Normally just
used to set this for writing a Table to file (FITS, ECSV, HDF5).
"""
t = simple_table(masked=True)
origs = deepcopy(t['a'].info.serialize_method)
try:
with serialize_method_as(t, 'test'):
assert all(t['a'].info.serialize_method[key] == 'test'
for key in t['a'].info.serialize_method)
raise ZeroDivisionError()
except ZeroDivisionError:
pass
assert t['a'].info.serialize_method == origs # dict compare
|
2d5441e4d91b203ad36c07f714e1737aba7b46f754559bc546e929b96a1f6fd2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from .test_table import SetupData
from astropy.table.bst import BST, FastRBT, FastBST
from astropy.table.sorted_array import SortedArray
from astropy.table.soco import SCEngine, HAS_SOCO
from astropy.table.table import QTable, Row, Table
from astropy import units as u
from astropy.time import Time
from astropy.table.column import BaseColumn
from astropy.table.index import get_index,SlicedIndex
try:
import bintrees
except ImportError:
HAS_BINTREES = False
else:
HAS_BINTREES = True
if HAS_BINTREES:
available_engines = [BST, FastBST, FastRBT, SortedArray]
else:
available_engines = [BST, SortedArray]
if HAS_SOCO:
available_engines.append(SCEngine)
@pytest.fixture(params=available_engines)
def engine(request):
return request.param
_col = [1, 2, 3, 4, 5]
@pytest.fixture(params=[
_col,
u.Quantity(_col),
Time(_col, format='jyear'),
])
def main_col(request):
return request.param
def assert_col_equal(col, array):
if isinstance(col, Time):
assert np.all(col == Time(array, format='jyear'))
else:
assert np.all(col == col.__class__(array))
@pytest.mark.usefixtures('table_types')
class TestIndex(SetupData):
def _setup(self, main_col, table_types):
super()._setup(table_types)
self.main_col = main_col
if isinstance(main_col, u.Quantity):
self._table_type = QTable
if not isinstance(main_col, list):
self._column_type = lambda x: x # don't change mixin type
self.mutable = isinstance(main_col, (list, u.Quantity))
def make_col(self, name, lst):
return self._column_type(lst, name=name)
def make_val(self, val):
if isinstance(self.main_col, Time):
return Time(val, format='jyear')
return val
@property
def t(self):
if not hasattr(self, '_t'):
self._t = self._table_type()
self._t['a'] = self._column_type(self.main_col)
self._t['b'] = self._column_type([4.0, 5.1, 6.2, 7.0, 1.1])
self._t['c'] = self._column_type(['7', '8', '9', '10', '11'])
return self._t
@pytest.mark.parametrize("composite", [False, True])
def test_table_index(self, main_col, table_types, composite, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index(('a', 'b') if composite else 'a', engine=engine)
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
if not self.mutable:
return
# test altering table columns
t['a'][0] = 4
t.add_row((6, 6.0, '7'))
t['a'][3] = 10
t.remove_row(2)
t.add_row((4, 5.0, '9'))
assert_col_equal(t['a'], np.array([4, 2, 10, 5, 6, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 7.0, 1.1, 6.0, 5.0]))
assert np.all(t['c'].data == np.array(['7', '8', '10', '11', '7', '9']))
index = t.indices[0]
l = list(index.data.items())
if composite:
assert np.all(l == [((2, 5.1), [1]),
((4, 4.0), [0]),
((4, 5.0), [5]),
((5, 1.1), [3]),
((6, 6.0), [4]),
((10, 7.0), [2])])
else:
assert np.all(l == [((2,), [1]),
((4,), [0, 5]),
((5,), [3]),
((6,), [4]),
((10,), [2])])
t.remove_indices('a')
assert len(t.indices) == 0
def test_table_slicing(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index('a', engine=engine)
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
for slice_ in ([0, 2], np.array([0, 2])):
t2 = t[slice_]
# t2 should retain an index on column 'a'
assert len(t2.indices) == 1
assert_col_equal(t2['a'], [1, 3])
# the index in t2 should reorder row numbers after slicing
assert np.all(t2.indices[0].sorted_data() == [0, 1])
# however, this index should be a deep copy of t1's index
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
def test_remove_rows(self, main_col, table_types, engine):
self._setup(main_col, table_types)
if not self.mutable:
return
t = self.t
t.add_index('a', engine=engine)
# remove individual row
t2 = t.copy()
t2.remove_rows(2)
assert_col_equal(t2['a'], [1, 2, 4, 5])
assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3])
# remove by list, ndarray, or slice
for cut in ([0, 2, 4], np.array([0, 2, 4]), slice(0, 5, 2)):
t2 = t.copy()
t2.remove_rows(cut)
assert_col_equal(t2['a'], [2, 4])
assert np.all(t2.indices[0].sorted_data() == [0, 1])
with pytest.raises(ValueError):
t.remove_rows((0, 2, 4))
def test_col_get_slice(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index('a', engine=engine)
# get slice
t2 = t[1:3] # table slice
assert_col_equal(t2['a'], [2, 3])
assert np.all(t2.indices[0].sorted_data() == [0, 1])
col_slice = t['a'][1:3]
assert_col_equal(col_slice, [2, 3])
# true column slices discard indices
if isinstance(t['a'], BaseColumn):
assert len(col_slice.info.indices) == 0
# take slice of slice
t2 = t[::2]
assert_col_equal(t2['a'], np.array([1, 3, 5]))
t3 = t2[::-1]
assert_col_equal(t3['a'], np.array([5, 3, 1]))
assert np.all(t3.indices[0].sorted_data() == [2, 1, 0])
t3 = t2[:2]
assert_col_equal(t3['a'], np.array([1, 3]))
assert np.all(t3.indices[0].sorted_data() == [0, 1])
# out-of-bound slices
for t_empty in (t2[3:], t2[2:1], t3[2:]):
assert len(t_empty['a']) == 0
assert np.all(t_empty.indices[0].sorted_data() == [])
if self.mutable:
# get boolean mask
mask = t['a'] % 2 == 1
t2 = t[mask]
assert_col_equal(t2['a'], [1, 3, 5])
assert np.all(t2.indices[0].sorted_data() == [0, 1, 2])
def test_col_set_slice(self, main_col, table_types, engine):
self._setup(main_col, table_types)
if not self.mutable:
return
t = self.t
t.add_index('a', engine=engine)
# set slice
t2 = t.copy()
t2['a'][1:3] = np.array([6, 7])
assert_col_equal(t2['a'], np.array([1, 6, 7, 4, 5]))
assert np.all(t2.indices[0].sorted_data() == [0, 3, 4, 1, 2])
# change original table via slice reference
t2 = t.copy()
t3 = t2[1:3]
assert_col_equal(t3['a'], np.array([2, 3]))
assert np.all(t3.indices[0].sorted_data() == [0, 1])
t3['a'][0] = 5
assert_col_equal(t3['a'], np.array([5, 3]))
assert_col_equal(t2['a'], np.array([1, 5, 3, 4, 5]))
assert np.all(t3.indices[0].sorted_data() == [1, 0])
assert np.all(t2.indices[0].sorted_data() == [0, 2, 3, 1, 4])
# set boolean mask
t2 = t.copy()
mask = t['a'] % 2 == 1
t2['a'][mask] = 0.
assert_col_equal(t2['a'], [0, 2, 0, 4, 0])
assert np.all(t2.indices[0].sorted_data() == [0, 2, 4, 1, 3])
def test_multiple_slices(self, main_col, table_types, engine):
self._setup(main_col, table_types)
if not self.mutable:
return
t = self.t
t.add_index('a', engine=engine)
for i in range(6, 51):
t.add_row((i, 1.0, 'A'))
assert_col_equal(t['a'], [i for i in range(1, 51)])
assert np.all(t.indices[0].sorted_data() == [i for i in range(50)])
evens = t[::2]
assert np.all(evens.indices[0].sorted_data() == [i for i in range(25)])
reverse = evens[::-1]
index = reverse.indices[0]
assert (index.start, index.stop, index.step) == (48, -2, -2)
assert np.all(index.sorted_data() == [i for i in range(24, -1, -1)])
# modify slice of slice
reverse[-10:] = 0
expected = np.array([i for i in range(1, 51)])
expected[:20][expected[:20] % 2 == 1] = 0
assert_col_equal(t['a'], expected)
assert_col_equal(evens['a'], expected[::2])
assert_col_equal(reverse['a'], expected[::2][::-1])
# first ten evens are now zero
assert np.all(t.indices[0].sorted_data() ==
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18,
1, 3, 5, 7, 9, 11, 13, 15, 17, 19]
+ [i for i in range(20, 50)])
assert np.all(evens.indices[0].sorted_data() == [i for i in range(25)])
assert np.all(reverse.indices[0].sorted_data() ==
[i for i in range(24, -1, -1)])
# try different step sizes of slice
t2 = t[1:20:2]
assert_col_equal(t2['a'], [2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
assert np.all(t2.indices[0].sorted_data() == [i for i in range(10)])
t3 = t2[::3]
assert_col_equal(t3['a'], [2, 8, 14, 20])
assert np.all(t3.indices[0].sorted_data() == [0, 1, 2, 3])
t4 = t3[2::-1]
assert_col_equal(t4['a'], [14, 8, 2])
assert np.all(t4.indices[0].sorted_data() == [2, 1, 0])
def test_sort(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t[::-1] # reverse table
assert_col_equal(t['a'], [5, 4, 3, 2, 1])
t.add_index('a', engine=engine)
assert np.all(t.indices[0].sorted_data() == [4, 3, 2, 1, 0])
if not self.mutable:
return
# sort table by column a
t2 = t.copy()
t2.sort('a')
assert_col_equal(t2['a'], [1, 2, 3, 4, 5])
assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4])
# sort table by primary key
t2 = t.copy()
t2.sort()
assert_col_equal(t2['a'], [1, 2, 3, 4, 5])
assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4])
def test_insert_row(self, main_col, table_types, engine):
self._setup(main_col, table_types)
if not self.mutable:
return
t = self.t
t.add_index('a', engine=engine)
t.insert_row(2, (6, 1.0, '12'))
assert_col_equal(t['a'], [1, 2, 6, 3, 4, 5])
assert np.all(t.indices[0].sorted_data() == [0, 1, 3, 4, 5, 2])
t.insert_row(1, (0, 4.0, '13'))
assert_col_equal(t['a'], [1, 0, 2, 6, 3, 4, 5])
assert np.all(t.indices[0].sorted_data() == [1, 0, 2, 4, 5, 6, 3])
def test_index_modes(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index('a', engine=engine)
# first, no special mode
assert len(t[[1, 3]].indices) == 1
assert len(t[::-1].indices) == 1
assert len(self._table_type(t).indices) == 1
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
t2 = t.copy()
# non-copy mode
with t.index_mode('discard_on_copy'):
assert len(t[[1, 3]].indices) == 0
assert len(t[::-1].indices) == 0
assert len(self._table_type(t).indices) == 0
assert len(t2.copy().indices) == 1 # mode should only affect t
# make sure non-copy mode is exited correctly
assert len(t[[1, 3]].indices) == 1
if not self.mutable:
return
# non-modify mode
with t.index_mode('freeze'):
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
t['a'][0] = 6
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
t.add_row((2, 1.5, '12'))
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
t.remove_rows([1, 3])
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
assert_col_equal(t['a'], [6, 3, 5, 2])
# mode should only affect t
assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4])
t2['a'][0] = 6
assert np.all(t2.indices[0].sorted_data() == [1, 2, 3, 4, 0])
# make sure non-modify mode is exited correctly
assert np.all(t.indices[0].sorted_data() == [3, 1, 2, 0])
if isinstance(t['a'], BaseColumn):
assert len(t['a'][::-1].info.indices) == 0
with t.index_mode('copy_on_getitem'):
assert len(t['a'][[1, 2]].info.indices) == 1
# mode should only affect t
assert len(t2['a'][[1, 2]].info.indices) == 0
assert len(t['a'][::-1].info.indices) == 0
assert len(t2['a'][::-1].info.indices) == 0
def test_index_retrieval(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index('a', engine=engine)
t.add_index(['a', 'c'], engine=engine)
assert len(t.indices) == 2
assert len(t.indices['a'].columns) == 1
assert len(t.indices['a', 'c'].columns) == 2
with pytest.raises(IndexError):
t.indices['b']
def test_col_rename(self, main_col, table_types, engine):
'''
Checks for a previous bug in which copying a Table
with different column names raised an exception.
'''
self._setup(main_col, table_types)
t = self.t
t.add_index('a', engine=engine)
t2 = self._table_type(self.t, names=['d', 'e', 'f'])
assert len(t2.indices) == 1
def test_table_loc(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index('a', engine=engine)
t.add_index('b', engine=engine)
t2 = t.loc[self.make_val(3)] # single label, with primary key 'a'
assert_col_equal(t2['a'], [3])
assert isinstance(t2, Row)
# list search
t2 = t.loc[[self.make_val(1), self.make_val(4), self.make_val(2)]]
assert_col_equal(t2['a'], [1, 4, 2]) # same order as input list
if not isinstance(main_col, Time):
# ndarray search
t2 = t.loc[np.array([1, 4, 2])]
assert_col_equal(t2['a'], [1, 4, 2])
assert_col_equal(t2['a'], [1, 4, 2])
t2 = t.loc[self.make_val(3): self.make_val(5)] # range search
assert_col_equal(t2['a'], [3, 4, 5])
t2 = t.loc['b', 5.0:7.0]
assert_col_equal(t2['b'], [5.1, 6.2, 7.0])
# search by sorted index
t2 = t.iloc[0:2] # two smallest rows by column 'a'
assert_col_equal(t2['a'], [1, 2])
t2 = t.iloc['b', 2:] # exclude two smallest rows in column 'b'
assert_col_equal(t2['b'], [5.1, 6.2, 7.0])
for t2 in (t.loc[:], t.iloc[:]):
assert_col_equal(t2['a'], [1, 2, 3, 4, 5])
def test_table_loc_indices(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index('a', engine=engine)
t.add_index('b', engine=engine)
t2 = t.loc_indices[self.make_val(3)] # single label, with primary key 'a'
assert t2 == 2
# list search
t2 = t.loc_indices[[self.make_val(1), self.make_val(4), self.make_val(2)]]
for i, p in zip(t2,[1,4,2]): # same order as input list
assert i == p-1
def test_invalid_search(self, main_col, table_types, engine):
# using .loc and .loc_indices with a value not present should raise an exception
self._setup(main_col, table_types)
t = self.t
t.add_index('a')
with pytest.raises(KeyError):
t.loc[self.make_val(6)]
with pytest.raises(KeyError):
t.loc_indices[self.make_val(6)]
def test_copy_index_references(self, main_col, table_types, engine):
# check against a bug in which indices were given an incorrect
# column reference when copied
self._setup(main_col, table_types)
t = self.t
t.add_index('a')
t.add_index('b')
t2 = t.copy()
assert t2.indices['a'].columns[0] is t2['a']
assert t2.indices['b'].columns[0] is t2['b']
def test_unique_index(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index('a', engine=engine, unique=True)
assert np.all(t.indices['a'].sorted_data() == [0, 1, 2, 3, 4])
if self.mutable:
with pytest.raises(ValueError):
t.add_row((5, 5.0, '9'))
def test_copy_indexed_table(self, table_types):
self._setup(_col, table_types)
t = self.t
t.add_index('a')
t.add_index(['a', 'b'])
for tp in (self._table_type(t), t.copy()):
assert len(t.indices) == len(tp.indices)
for index, indexp in zip(t.indices, tp.indices):
assert np.all(index.data.data == indexp.data.data)
assert index.data.data.colnames == indexp.data.data.colnames
def test_updating_row_byindex(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = Table([['a', 'b', 'c', 'd'], [2, 3, 4, 5], [3, 4, 5, 6]], names=('a', 'b', 'c'), meta={'name': 'first table'})
t.add_index('a', engine=engine)
t.add_index('b', engine=engine)
t.loc['c'] = ['g', 40, 50] # single label, with primary key 'a'
t2 = t[2]
assert list(t2) == ['g', 40, 50]
# list search
t.loc[['a', 'd', 'b']] = [['a', 20, 30], ['d', 50, 60], ['b', 30, 40]]
t2 = [['a', 20, 30], ['d', 50, 60], ['b', 30, 40]]
for i, p in zip(t2, [1, 4, 2]): # same order as input list
assert list(t[p-1]) == i
def test_invalid_updates(self, main_col, table_types, engine):
# using .loc and .loc_indices with a value not present should raise an exception
self._setup(main_col, table_types)
t = Table([[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]], names=('a', 'b', 'c'), meta={'name': 'first table'})
t.add_index('a')
with pytest.raises(ValueError):
t.loc[3] = [[1,2,3]]
with pytest.raises(ValueError):
t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5, 6]]
with pytest.raises(ValueError):
t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5, 6], [2, 3]]
with pytest.raises(ValueError):
t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5], [2, 3]]
def test_get_index():
a = [1, 4, 5, 2, 7, 4, 45]
b = [2.0, 5.0, 8.2, 3.7, 4.3, 6.5, 3.3]
t = Table([a, b], names=('a', 'b'), meta={'name': 'first table'})
t.add_index(['a'])
# Getting the values of index using names
x1 = get_index(t, names=['a'])
assert isinstance(x1, SlicedIndex)
assert len(x1.columns) == 1
assert len(x1.columns[0]) == 7
assert x1.columns[0].info.name == 'a'
# Getting the vales of index using table_copy
x2 = get_index(t, table_copy=t[['a']])
assert isinstance(x2, SlicedIndex)
assert len(x2.columns) == 1
assert len(x2.columns[0]) == 7
assert x2.columns[0].info.name == 'a'
with pytest.raises(ValueError):
get_index(t, names=['a'], table_copy=t[['a']])
with pytest.raises(ValueError):
get_index(t, names=None, table_copy=None)
|
7b9d9dffa6aa09f7522e20b0f6fbbd127a2c56df2bc917c3aaaad6ec1260c8a2 | from os.path import abspath, dirname, join
import textwrap
import pytest
from astropy.table.table import Table
from astropy import extern
try:
import bleach # noqa
HAS_BLEACH = True
except ImportError:
HAS_BLEACH = False
try:
import IPython # pylint: disable=W0611
except ImportError:
HAS_IPYTHON = False
else:
HAS_IPYTHON = True
EXTERN_DIR = abspath(join(dirname(extern.__file__), 'jquery', 'data'))
REFERENCE = """
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
<style>
body {font-family: sans-serif;}
table.dataTable {width: auto !important; margin: 0 !important;}
.dataTables_filter, .dataTables_paginate {float: left !important; margin-left:1em}
</style>
<link href="%(datatables_css_url)s" rel="stylesheet" type="text/css"/>
<script src="%(jquery_url)s">
</script>
<script src="%(datatables_js_url)s">
</script>
</head>
<body>
<script>
var astropy_sort_num = function(a, b) {
var a_num = parseFloat(a);
var b_num = parseFloat(b);
if (isNaN(a_num) && isNaN(b_num))
return ((a < b) ? -1 : ((a > b) ? 1 : 0));
else if (!isNaN(a_num) && !isNaN(b_num))
return ((a_num < b_num) ? -1 : ((a_num > b_num) ? 1 : 0));
else
return isNaN(a_num) ? -1 : 1;
}
jQuery.extend( jQuery.fn.dataTableExt.oSort, {
"optionalnum-asc": astropy_sort_num,
"optionalnum-desc": function (a,b) { return -astropy_sort_num(a, b); }
});
$(document).ready(function() {
$('#%(table_id)s').dataTable({
order: [],
pageLength: %(length)s,
lengthMenu: [[%(display_length)s, -1], [%(display_length)s, 'All']],
pagingType: "full_numbers",
columnDefs: [{targets: [0], type: "optionalnum"}]
});
} ); </script>
<table class="%(table_class)s" id="%(table_id)s">
<thead>
<tr>
<th>a</th>
<th>b</th>
</tr>
</thead>
%(lines)s
</table>
</body>
</html>
"""
TPL = (' <tr>\n'
' <td>{0}</td>\n'
' <td>{1}</td>\n'
' </tr>')
def format_lines(col1, col2):
return '\n'.join(TPL.format(a, b) for a, b in zip(col1, col2))
def test_write_jsviewer_default(tmpdir):
t = Table()
t['a'] = [1, 2, 3, 4, 5]
t['b'] = ['a', 'b', 'c', 'd', 'e']
t['a'].unit = 'm'
tmpfile = tmpdir.join('test.html').strpath
t.write(tmpfile, format='jsviewer')
ref = REFERENCE % dict(
lines=format_lines(t['a'], t['b']),
table_class='display compact',
table_id='table%s' % id(t),
length='50',
display_length='10, 25, 50, 100, 500, 1000',
datatables_css_url='https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css',
datatables_js_url='https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js',
jquery_url='https://code.jquery.com/jquery-3.1.1.min.js'
)
with open(tmpfile) as f:
assert f.read().strip() == ref.strip()
@pytest.mark.skipif('not HAS_BLEACH')
def test_write_jsviewer_options(tmpdir):
t = Table()
t['a'] = [1, 2, 3, 4, 5]
t['b'] = ['<b>a</b>', 'b', 'c', 'd', 'e']
t['a'].unit = 'm'
tmpfile = tmpdir.join('test.html').strpath
t.write(tmpfile, format='jsviewer', table_id='test', max_lines=3,
jskwargs={'display_length': 5}, table_class='display hover',
htmldict=dict(raw_html_cols='b'))
ref = REFERENCE % dict(
lines=format_lines(t['a'][:3], t['b'][:3]),
table_class='display hover',
table_id='test',
length='5',
display_length='5, 10, 25, 50, 100, 500, 1000',
datatables_css_url='https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css',
datatables_js_url='https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js',
jquery_url='https://code.jquery.com/jquery-3.1.1.min.js'
)
with open(tmpfile) as f:
assert f.read().strip() == ref.strip()
def test_write_jsviewer_local(tmpdir):
t = Table()
t['a'] = [1, 2, 3, 4, 5]
t['b'] = ['a', 'b', 'c', 'd', 'e']
t['a'].unit = 'm'
tmpfile = tmpdir.join('test.html').strpath
t.write(tmpfile, format='jsviewer', table_id='test',
jskwargs={'use_local_files': True})
ref = REFERENCE % dict(
lines=format_lines(t['a'], t['b']),
table_class='display compact',
table_id='test',
length='50',
display_length='10, 25, 50, 100, 500, 1000',
datatables_css_url='file://' + join(EXTERN_DIR, 'css', 'jquery.dataTables.css'),
datatables_js_url='file://' + join(EXTERN_DIR, 'js', 'jquery.dataTables.min.js'),
jquery_url='file://' + join(EXTERN_DIR, 'js', 'jquery-3.1.1.min.js')
)
with open(tmpfile) as f:
assert f.read().strip() == ref.strip()
@pytest.mark.skipif('not HAS_IPYTHON')
def test_show_in_notebook():
t = Table()
t['a'] = [1, 2, 3, 4, 5]
t['b'] = ['b', 'c', 'a', 'd', 'e']
htmlstr_windx = t.show_in_notebook().data # should default to 'idx'
htmlstr_windx_named = t.show_in_notebook(show_row_index='realidx').data
htmlstr_woindx = t.show_in_notebook(show_row_index=False).data
assert (textwrap.dedent("""
<thead><tr><th>idx</th><th>a</th><th>b</th></tr></thead>
<tr><td>0</td><td>1</td><td>b</td></tr>
<tr><td>1</td><td>2</td><td>c</td></tr>
<tr><td>2</td><td>3</td><td>a</td></tr>
<tr><td>3</td><td>4</td><td>d</td></tr>
<tr><td>4</td><td>5</td><td>e</td></tr>
""").strip() in htmlstr_windx)
assert '<thead><tr><th>realidx</th><th>a</th><th>b</th></tr></thead>' in htmlstr_windx_named
assert '<thead><tr><th>a</th><th>b</th></tr></thead>' in htmlstr_woindx
|
7f5e44153a9f883f6ce72de98ec312b071f8bdd50fa92bbec28c7ff2cdc8ade9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import OrderedDict, UserDict
from collections.abc import Mapping
import pytest
import numpy as np
from astropy.table import Column, TableColumns
class TestTableColumnsInit():
def test_init(self):
"""Test initialisation with lists, tuples, dicts of arrays
rather than Columns [regression test for #2647]"""
x1 = np.arange(10.)
x2 = np.arange(5.)
x3 = np.arange(7.)
col_list = [('x1', x1), ('x2', x2), ('x3', x3)]
tc_list = TableColumns(col_list)
for col in col_list:
assert col[0] in tc_list
assert tc_list[col[0]] is col[1]
col_tuple = (('x1', x1), ('x2', x2), ('x3', x3))
tc_tuple = TableColumns(col_tuple)
for col in col_tuple:
assert col[0] in tc_tuple
assert tc_tuple[col[0]] is col[1]
col_dict = dict([('x1', x1), ('x2', x2), ('x3', x3)])
tc_dict = TableColumns(col_dict)
for col in tc_dict.keys():
assert col in tc_dict
assert tc_dict[col] is col_dict[col]
columns = [Column(col[1], name=col[0]) for col in col_list]
tc = TableColumns(columns)
for col in columns:
assert col.name in tc
assert tc[col.name] is col
# pytest.mark.usefixtures('table_type')
class BaseInitFrom():
def _setup(self, table_type):
pass
def test_basic_init(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=('a', 'b', 'c'))
assert t.colnames == ['a', 'b', 'c']
assert np.all(t['a'] == np.array([1, 3]))
assert np.all(t['b'] == np.array([2, 4]))
assert np.all(t['c'] == np.array([3, 5]))
assert all(t[name].name == name for name in t.colnames)
def test_set_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=('a', 'b', 'c'), dtype=('i4', 'f4', 'f8'))
assert t.colnames == ['a', 'b', 'c']
assert np.all(t['a'] == np.array([1, 3], dtype='i4'))
assert np.all(t['b'] == np.array([2, 4], dtype='f4'))
assert np.all(t['c'] == np.array([3, 5], dtype='f8'))
assert t['a'].dtype.type == np.int32
assert t['b'].dtype.type == np.float32
assert t['c'].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_names_dtype_mismatch(self, table_type):
self._setup(table_type)
with pytest.raises(ValueError):
table_type(self.data, names=('a',), dtype=('i4', 'f4', 'i4'))
def test_names_cols_mismatch(self, table_type):
self._setup(table_type)
with pytest.raises(ValueError):
table_type(self.data, names=('a',), dtype=('i4'))
@pytest.mark.usefixtures('table_type')
class BaseInitFromListLike(BaseInitFrom):
def test_names_cols_mismatch(self, table_type):
self._setup(table_type)
with pytest.raises(ValueError):
table_type(self.data, names=['a'], dtype=[int])
def test_names_copy_false(self, table_type):
self._setup(table_type)
with pytest.raises(ValueError):
table_type(self.data, names=['a'], dtype=[int], copy=False)
@pytest.mark.usefixtures('table_type')
class BaseInitFromDictLike(BaseInitFrom):
pass
@pytest.mark.usefixtures('table_type')
class TestInitFromNdarrayHomo(BaseInitFromListLike):
def setup_method(self, method):
self.data = np.array([(1, 2, 3),
(3, 4, 5)],
dtype='i4')
def test_default_names(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert t.colnames == ['col0', 'col1', 'col2']
def test_ndarray_ref(self, table_type):
"""Init with ndarray and copy=False and show that this is a reference
to input ndarray"""
self._setup(table_type)
t = table_type(self.data, copy=False)
t['col1'][1] = 0
assert t.as_array()['col1'][1] == 0
assert t['col1'][1] == 0
assert self.data[1][1] == 0
def test_partial_names_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=['a', None, 'c'], dtype=[None, None, 'f8'])
assert t.colnames == ['a', 'col1', 'c']
assert t['a'].dtype.type == np.int32
assert t['col1'].dtype.type == np.int32
assert t['c'].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_ref(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=['a', None, 'c'])
assert t.colnames == ['a', 'col1', 'c']
assert t['a'].dtype.type == np.int32
assert t['col1'].dtype.type == np.int32
assert t['c'].dtype.type == np.int32
assert all(t[name].name == name for name in t.colnames)
@pytest.mark.usefixtures('table_type')
class TestInitFromListOfLists(BaseInitFromListLike):
def setup_method(self, table_type):
self._setup(table_type)
self.data = [(np.int32(1), np.int32(3)),
Column(name='col1', data=[2, 4], dtype=np.int32),
np.array([3, 5], dtype=np.int32)]
def test_default_names(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert t.colnames == ['col0', 'col1', 'col2']
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=['b', None, 'c'],
dtype=['f4', None, 'f8'])
assert t.colnames == ['b', 'col1', 'c']
assert t['b'].dtype.type == np.float32
assert t['col1'].dtype.type == np.int32
assert t['c'].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_bad_data(self, table_type):
self._setup(table_type)
with pytest.raises(ValueError):
table_type([[1, 2],
[3, 4, 5]])
@pytest.mark.usefixtures('table_type')
class TestInitFromListOfDicts(BaseInitFromListLike):
def _setup(self, table_type):
self.data = [{'a': 1, 'b': 2, 'c': 3},
{'a': 3, 'b': 4, 'c': 5}]
def test_names(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert all(colname in set(['a', 'b', 'c']) for colname in t.colnames)
def test_names_ordered(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=('c', 'b', 'a'))
assert t.colnames == ['c', 'b', 'a']
def test_bad_data(self, table_type):
self._setup(table_type)
with pytest.raises(ValueError):
table_type([{'a': 1, 'b': 2, 'c': 3},
{'a': 2, 'b': 4}])
@pytest.mark.usefixtures('table_type')
class TestInitFromColsList(BaseInitFromListLike):
def _setup(self, table_type):
self.data = [Column([1, 3], name='x', dtype=np.int32),
np.array([2, 4], dtype=np.int32),
np.array([3, 5], dtype='i8')]
def test_default_names(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert t.colnames == ['x', 'col1', 'col2']
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=['b', None, 'c'], dtype=['f4', None, 'f8'])
assert t.colnames == ['b', 'col1', 'c']
assert t['b'].dtype.type == np.float32
assert t['col1'].dtype.type == np.int32
assert t['c'].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_ref(self, table_type):
"""Test that initializing from a list of columns can be done by reference"""
self._setup(table_type)
t = table_type(self.data, copy=False)
t['x'][0] = 100
assert self.data[0][0] == 100
@pytest.mark.usefixtures('table_type')
class TestInitFromNdarrayStruct(BaseInitFromDictLike):
def _setup(self, table_type):
self.data = np.array([(1, 2, 3),
(3, 4, 5)],
dtype=[(str('x'), 'i8'), (str('y'), 'i4'), (str('z'), 'i8')])
def test_ndarray_ref(self, table_type):
"""Init with ndarray and copy=False and show that table uses reference
to input ndarray"""
self._setup(table_type)
t = table_type(self.data, copy=False)
t['x'][1] = 0 # Column-wise assignment
t[0]['y'] = 0 # Row-wise assignment
assert self.data['x'][1] == 0
assert self.data['y'][0] == 0
assert np.all(np.array(t) == self.data)
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=['e', None, 'd'], dtype=['f4', None, 'f8'])
assert t.colnames == ['e', 'y', 'd']
assert t['e'].dtype.type == np.float32
assert t['y'].dtype.type == np.int32
assert t['d'].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_ref(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=['e', None, 'd'], copy=False)
assert t.colnames == ['e', 'y', 'd']
assert t['e'].dtype.type == np.int64
assert t['y'].dtype.type == np.int32
assert t['d'].dtype.type == np.int64
assert all(t[name].name == name for name in t.colnames)
@pytest.mark.usefixtures('table_type')
class TestInitFromDict(BaseInitFromDictLike):
def _setup(self, table_type):
self.data = dict([('a', Column([1, 3], name='x')),
('b', [2, 4]),
('c', np.array([3, 5], dtype='i8'))])
@pytest.mark.usefixtures('table_type')
class TestInitFromMapping(BaseInitFromDictLike):
def _setup(self, table_type):
self.data = UserDict([('a', Column([1, 3], name='x')),
('b', [2, 4]),
('c', np.array([3, 5], dtype='i8'))])
assert isinstance(self.data, Mapping)
assert not isinstance(self.data, dict)
@pytest.mark.usefixtures('table_type')
class TestInitFromOrderedDict(BaseInitFromDictLike):
def _setup(self, table_type):
self.data = OrderedDict([('a', Column(name='x', data=[1, 3])),
('b', [2, 4]),
('c', np.array([3, 5], dtype='i8'))])
def test_col_order(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert t.colnames == ['a', 'b', 'c']
@pytest.mark.usefixtures('table_type')
class TestInitFromRow(BaseInitFromDictLike):
def _setup(self, table_type):
arr = np.array([(1, 2, 3),
(3, 4, 5)],
dtype=[(str('x'), 'i8'), (str('y'), 'i8'), (str('z'), 'f8')])
self.data = table_type(arr, meta={'comments': ['comment1', 'comment2']})
def test_init_from_row(self, table_type):
self._setup(table_type)
t = table_type(self.data[0])
# Values and meta match original
assert t.meta['comments'][0] == 'comment1'
for name in t.colnames:
assert np.all(t[name] == self.data[name][0:1])
assert all(t[name].name == name for name in t.colnames)
# Change value in new instance and check that original is the same
t['x'][0] = 8
t.meta['comments'][1] = 'new comment2'
assert np.all(t['x'] == np.array([8]))
assert np.all(self.data['x'] == np.array([1, 3]))
assert self.data.meta['comments'][1] == 'comment2'
@pytest.mark.usefixtures('table_type')
class TestInitFromTable(BaseInitFromDictLike):
def _setup(self, table_type):
arr = np.array([(1, 2, 3),
(3, 4, 5)],
dtype=[(str('x'), 'i8'), (str('y'), 'i8'), (str('z'), 'f8')])
self.data = table_type(arr, meta={'comments': ['comment1', 'comment2']})
def test_data_meta_copy(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert t.meta['comments'][0] == 'comment1'
t['x'][1] = 8
t.meta['comments'][1] = 'new comment2'
assert self.data.meta['comments'][1] == 'comment2'
assert np.all(t['x'] == np.array([1, 8]))
assert np.all(self.data['x'] == np.array([1, 3]))
assert t['z'].name == 'z'
assert all(t[name].name == name for name in t.colnames)
def test_table_ref(self, table_type):
self._setup(table_type)
t = table_type(self.data, copy=False)
t['x'][1] = 0
assert t['x'][1] == 0
assert self.data['x'][1] == 0
assert np.all(t.as_array() == self.data.as_array())
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=['e', None, 'd'], dtype=['f4', None, 'i8'])
assert t.colnames == ['e', 'y', 'd']
assert t['e'].dtype.type == np.float32
assert t['y'].dtype.type == np.int64
assert t['d'].dtype.type == np.int64
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_ref(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=['e', None, 'd'], copy=False)
assert t.colnames == ['e', 'y', 'd']
assert t['e'].dtype.type == np.int64
assert t['y'].dtype.type == np.int64
assert t['d'].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_init_from_columns(self, table_type):
self._setup(table_type)
t = table_type(self.data)
t2 = table_type(t.columns['z', 'x', 'y'])
assert t2.colnames == ['z', 'x', 'y']
assert t2.dtype.names == ('z', 'x', 'y')
def test_init_from_columns_slice(self, table_type):
self._setup(table_type)
t = table_type(self.data)
t2 = table_type(t.columns[0:2])
assert t2.colnames == ['x', 'y']
assert t2.dtype.names == ('x', 'y')
def test_init_from_columns_mix(self, table_type):
self._setup(table_type)
t = table_type(self.data)
t2 = table_type([t.columns[0], t.columns['z']])
assert t2.colnames == ['x', 'z']
assert t2.dtype.names == ('x', 'z')
@pytest.mark.usefixtures('table_type')
class TestInitFromNone():
# Note table_table.TestEmptyData tests initializing a completely empty
# table and adding data.
def test_data_none_with_cols(self, table_type):
"""
Test different ways of initing an empty table
"""
np_t = np.empty(0, dtype=[(str('a'), 'f4', (2,)),
(str('b'), 'i4')])
for kwargs in ({'names': ('a', 'b')},
{'names': ('a', 'b'), 'dtype': (('f4', (2,)), 'i4')},
{'dtype': [(str('a'), 'f4', (2,)), (str('b'), 'i4')]},
{'dtype': np_t.dtype}):
t = table_type(**kwargs)
assert t.colnames == ['a', 'b']
assert len(t['a']) == 0
assert len(t['b']) == 0
if 'dtype' in kwargs:
assert t['a'].dtype.type == np.float32
assert t['b'].dtype.type == np.int32
assert t['a'].shape[1:] == (2,)
@pytest.mark.usefixtures('table_types')
class TestInitFromRows():
def test_init_with_rows(self, table_type):
for rows in ([[1, 'a'], [2, 'b']],
[(1, 'a'), (2, 'b')],
((1, 'a'), (2, 'b'))):
t = table_type(rows=rows, names=('a', 'b'))
assert np.all(t['a'] == [1, 2])
assert np.all(t['b'] == ['a', 'b'])
assert t.colnames == ['a', 'b']
assert t['a'].dtype.kind == 'i'
assert t['b'].dtype.kind in ('S', 'U')
# Regression test for
# https://github.com/astropy/astropy/issues/3052
assert t['b'].dtype.str.endswith('1')
rows = np.arange(6).reshape(2, 3)
t = table_type(rows=rows, names=('a', 'b', 'c'), dtype=['f8', 'f4', 'i8'])
assert np.all(t['a'] == [0, 3])
assert np.all(t['b'] == [1, 4])
assert np.all(t['c'] == [2, 5])
assert t.colnames == ['a', 'b', 'c']
assert t['a'].dtype.str.endswith('f8')
assert t['b'].dtype.str.endswith('f4')
assert t['c'].dtype.str.endswith('i8')
def test_init_with_rows_and_data(self, table_type):
with pytest.raises(ValueError) as err:
table_type(data=[[1]], rows=[[1]])
assert "Cannot supply both `data` and `rows` values" in str(err)
@pytest.mark.usefixtures('table_type')
def test_init_and_ref_from_multidim_ndarray(table_type):
"""
Test that initializing from an ndarray structured array with
a multi-dim column works for both copy=False and True and that
the referencing is as expected.
"""
for copy in (False, True):
nd = np.array([(1, [10, 20]),
(3, [30, 40])],
dtype=[(str('a'), 'i8'), (str('b'), 'i8', (2,))])
t = table_type(nd, copy=copy)
assert t.colnames == ['a', 'b']
assert t['a'].shape == (2,)
assert t['b'].shape == (2, 2)
t['a'][0] = -200
t['b'][1][1] = -100
if copy:
assert nd[str('a')][0] == 1
assert nd[str('b')][1][1] == 40
else:
assert nd[str('a')][0] == -200
assert nd[str('b')][1][1] == -100
@pytest.mark.usefixtures('table_type')
@pytest.mark.parametrize('copy', [False, True])
def test_init_and_ref_from_dict(table_type, copy):
"""
Test that initializing from a dict works for both copy=False and True and that
the referencing is as expected.
"""
x1 = np.arange(10.)
x2 = np.zeros(10)
col_dict = dict([('x1', x1), ('x2', x2)])
t = table_type(col_dict, copy=copy)
assert set(t.colnames) == set(['x1', 'x2'])
assert t['x1'].shape == (10,)
assert t['x2'].shape == (10,)
t['x1'][0] = -200
t['x2'][1] = -100
if copy:
assert x1[0] == 0.
assert x2[1] == 0.
else:
assert x1[0] == -200
assert x2[1] == -100
@pytest.mark.usefixtures('table_type')
def test_init_from_row_OrderedDict(table_type):
row1 = OrderedDict([('b', 1), ('a', 0)])
row2 = {'a': 10, 'b': 20}
rows12 = [row1, row2]
row3 = dict([('b', 1), ('a', 0)])
row4 = dict([('b', 11), ('a', 10)])
rows34 = [row3, row4]
t1 = table_type(rows=rows12)
t2 = table_type(rows=rows34)
assert t1.colnames == ['b', 'a']
assert t2.colnames == ['a', 'b']
with pytest.raises(ValueError):
table_type(rows=[OrderedDict([('b', 1)]), {'a': 10, 'b': 20}])
|
c913a1455de4331f3ecc444860d838a56c1aeb83b51832301f0c097f4d2f7f8f | import os
import re
from astropy.table.scripts import showtable
from astropy.utils.compat import NUMPY_LT_1_14
ROOT = os.path.abspath(os.path.dirname(__file__))
ASCII_ROOT = os.path.join(ROOT, '..', '..', 'io', 'ascii', 'tests')
FITS_ROOT = os.path.join(ROOT, '..', '..', 'io', 'fits', 'tests')
VOTABLE_ROOT = os.path.join(ROOT, '..', '..', 'io', 'votable', 'tests')
def test_missing_file(capsys):
showtable.main(['foobar.fits'])
out, err = capsys.readouterr()
assert err.startswith("ERROR: [Errno 2] No such file or directory: "
"'foobar.fits'")
def test_info(capsys):
showtable.main([os.path.join(FITS_ROOT, 'data/table.fits'), '--info'])
out, err = capsys.readouterr()
assert out.splitlines() == ['<Table length=3>',
' name dtype ',
'------ -------',
'target bytes20',
' V_mag float32']
def test_stats(capsys):
showtable.main([os.path.join(FITS_ROOT, 'data/table.fits'), '--stats'])
out, err = capsys.readouterr()
if NUMPY_LT_1_14:
expected = ['<Table length=3>',
' name mean std min max ',
'------ ------- ------- ---- ----',
'target -- -- -- --',
' V_mag 12.8667 1.72111 11.1 15.2']
else:
expected = ['<Table length=3>',
' name mean std min max ',
'------ --------- --------- ---- ----',
'target -- -- -- --',
' V_mag 12.86666[0-9]? 1.7211105 11.1 15.2']
out = out.splitlines()
assert out[:4] == expected[:4]
# Here we use re.match as in some cases one of the values above is
# platform-dependent.
assert re.match(expected[4], out[4]) is not None
def test_fits(capsys):
showtable.main([os.path.join(FITS_ROOT, 'data/table.fits')])
out, err = capsys.readouterr()
assert out.splitlines() == [' target V_mag',
'------- -----',
'NGC1001 11.1',
'NGC1002 12.3',
'NGC1003 15.2']
def test_fits_hdu(capsys):
showtable.main([os.path.join(FITS_ROOT, 'data/zerowidth.fits'),
'--hdu', 'AIPS OF'])
out, err = capsys.readouterr()
if NUMPY_LT_1_14:
assert out.startswith(
' TIME SOURCE ID ANTENNA NO. SUBARRAY FREQ ID ANT FLAG STATUS 1\n'
' DAYS \n'
'-------- --------- ----------- -------- ------- -------- --------\n'
'0.144387 1 10 1 1 4 4\n')
else:
assert out.startswith(
' TIME SOURCE ID ANTENNA NO. SUBARRAY FREQ ID ANT FLAG STATUS 1\n'
' DAYS \n'
'---------- --------- ----------- -------- ------- -------- --------\n'
'0.14438657 1 10 1 1 4 4\n')
def test_csv(capsys):
showtable.main([os.path.join(ASCII_ROOT, 'data/simple_csv.csv')])
out, err = capsys.readouterr()
assert out.splitlines() == [' a b c ',
'--- --- ---',
' 1 2 3',
' 4 5 6']
def test_ascii_format(capsys):
showtable.main([os.path.join(ASCII_ROOT, 'data/commented_header.dat'),
'--format', 'ascii.commented_header'])
out, err = capsys.readouterr()
assert out.splitlines() == [' a b c ',
'--- --- ---',
' 1 2 3',
' 4 5 6']
def test_ascii_delimiter(capsys):
showtable.main([os.path.join(ASCII_ROOT, 'data/simple2.txt'),
'--format', 'ascii', '--delimiter', '|'])
out, err = capsys.readouterr()
assert out.splitlines() == [
"obsid redshift X Y object rad ",
"----- -------- ---- ---- ----------- ----",
" 3102 0.32 4167 4085 Q1250+568-A 9.0",
" 3102 0.32 4706 3916 Q1250+568-B 14.0",
" 877 0.22 4378 3892 'Source 82' 12.5",
]
def test_votable(capsys):
showtable.main([os.path.join(VOTABLE_ROOT, 'data/regression.xml'),
'--table-id', 'main_table', '--max-width', '50'])
out, err = capsys.readouterr()
assert out.splitlines() == [
' string_test string_test_2 ... bitarray2 [16]',
'----------------- ------------- ... --------------',
' String & test Fixed stri ... True .. False',
'String & test 0123456789 ... -- .. --',
' XXXX XXXX ... -- .. --',
' ... -- .. --',
' ... -- .. --',
]
def test_max_lines(capsys):
showtable.main([os.path.join(ASCII_ROOT, 'data/cds2.dat'),
'--format', 'ascii.cds', '--max-lines', '7',
'--max-width', '30'])
out, err = capsys.readouterr()
assert out.splitlines() == [
' SST ... Note',
' ... ',
'--------------- ... ----',
'041314.1+281910 ... --',
' ... ... ...',
'044427.1+251216 ... --',
'044642.6+245903 ... --',
'Length = 215 rows',
]
def test_show_dtype(capsys):
showtable.main([os.path.join(FITS_ROOT, 'data/table.fits'),
'--show-dtype'])
out, err = capsys.readouterr()
assert out.splitlines() == [
' target V_mag ',
'bytes20 float32',
'------- -------',
'NGC1001 11.1',
'NGC1002 12.3',
'NGC1003 15.2',
]
def test_hide_unit(capsys):
showtable.main([os.path.join(ASCII_ROOT, 'data/cds.dat'),
'--format', 'ascii.cds'])
out, err = capsys.readouterr()
assert out.splitlines() == [
'Index RAh RAm RAs DE- DEd DEm DEs Match Class AK Fit ',
' h min s deg arcmin arcsec mag GMsun',
'----- --- --- ----- --- --- ------ ------ ----- ----- --- -----',
' 1 3 28 39.09 + 31 6 1.9 -- I* -- 1.35',
]
showtable.main([os.path.join(ASCII_ROOT, 'data/cds.dat'),
'--format', 'ascii.cds', '--hide-unit'])
out, err = capsys.readouterr()
assert out.splitlines() == [
'Index RAh RAm RAs DE- DEd DEm DEs Match Class AK Fit ',
'----- --- --- ----- --- --- --- --- ----- ----- --- ----',
' 1 3 28 39.09 + 31 6 1.9 -- I* -- 1.35',
]
|
0a5c82cccb99490f701d27b0d9790a5ca4e56aeb83072de0b7689189374a5eea | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.tests.helper import catch_warnings
from astropy.table import Table, Column, QTable, table_helpers, NdarrayMixin, unique
from astropy.utils.exceptions import AstropyUserWarning
from astropy import time
from astropy import units as u
from astropy import coordinates
def sort_eq(list1, list2):
return sorted(list1) == sorted(list2)
def test_column_group_by(T1):
for masked in (False, True):
t1 = Table(T1, masked=masked)
t1a = t1['a'].copy()
# Group by a Column (i.e. numpy array)
t1ag = t1a.group_by(t1['a'])
assert np.all(t1ag.groups.indices == np.array([0, 1, 4, 8]))
# Group by a Table
t1ag = t1a.group_by(t1['a', 'b'])
assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
# Group by a numpy structured array
t1ag = t1a.group_by(t1['a', 'b'].as_array())
assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
def test_table_group_by(T1):
"""
Test basic table group_by functionality for possible key types and for
masked/unmasked tables.
"""
for masked in (False, True):
t1 = Table(T1, masked=masked)
# Group by a single column key specified by name
tg = t1.group_by('a')
assert np.all(tg.groups.indices == np.array([0, 1, 4, 8]))
assert str(tg.groups) == "<TableGroups indices=[0 1 4 8]>"
assert str(tg['a'].groups) == "<ColumnGroups indices=[0 1 4 8]>"
# Sorted by 'a' and in original order for rest
assert tg.pformat() == [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3']
assert tg.meta['ta'] == 1
assert tg['c'].meta['a'] == 1
assert tg['c'].description == 'column c'
# Group by a table column
tg2 = t1.group_by(t1['a'])
assert tg.pformat() == tg2.pformat()
# Group by two columns spec'd by name
for keys in (['a', 'b'], ('a', 'b')):
tg = t1.group_by(keys)
assert np.all(tg.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
# Sorted by 'a', 'b' and in original order for rest
assert tg.pformat() == [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 a 2.0 6',
' 1 a 1.0 7',
' 1 b 3.0 5',
' 2 a 4.0 3',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 c 7.0 0']
# Group by a Table
tg2 = t1.group_by(t1['a', 'b'])
assert tg.pformat() == tg2.pformat()
# Group by a structured array
tg2 = t1.group_by(t1['a', 'b'].as_array())
assert tg.pformat() == tg2.pformat()
# Group by a simple ndarray
tg = t1.group_by(np.array([0, 1, 0, 1, 2, 1, 0, 0]))
assert np.all(tg.groups.indices == np.array([0, 4, 7, 8]))
assert tg.pformat() == [' a b c d ',
'--- --- --- ---',
' 2 c 7.0 0',
' 2 b 6.0 2',
' 1 a 2.0 6',
' 1 a 1.0 7',
' 2 b 5.0 1',
' 2 a 4.0 3',
' 1 b 3.0 5',
' 0 a 0.0 4']
def test_groups_keys(T1):
tg = T1.group_by('a')
keys = tg.groups.keys
assert keys.dtype.names == ('a',)
assert np.all(keys['a'] == np.array([0, 1, 2]))
tg = T1.group_by(['a', 'b'])
keys = tg.groups.keys
assert keys.dtype.names == ('a', 'b')
assert np.all(keys['a'] == np.array([0, 1, 1, 2, 2, 2]))
assert np.all(keys['b'] == np.array(['a', 'a', 'b', 'a', 'b', 'c']))
# Grouping by Column ignores column name
tg = T1.group_by(T1['b'])
keys = tg.groups.keys
assert keys.dtype.names is None
def test_groups_iterator(T1):
tg = T1.group_by('a')
for ii, group in enumerate(tg.groups):
assert group.pformat() == tg.groups[ii].pformat()
assert group['a'][0] == tg['a'][tg.groups.indices[ii]]
def test_grouped_copy(T1):
"""
Test that copying a table or column copies the groups properly
"""
for masked in (False, True):
t1 = Table(T1, masked=masked)
tg = t1.group_by('a')
tgc = tg.copy()
assert np.all(tgc.groups.indices == tg.groups.indices)
assert np.all(tgc.groups.keys == tg.groups.keys)
tac = tg['a'].copy()
assert np.all(tac.groups.indices == tg['a'].groups.indices)
c1 = t1['a'].copy()
gc1 = c1.group_by(t1['a'])
gc1c = gc1.copy()
assert np.all(gc1c.groups.indices == np.array([0, 1, 4, 8]))
def test_grouped_slicing(T1):
"""
Test that slicing a table removes previous grouping
"""
for masked in (False, True):
t1 = Table(T1, masked=masked)
# Regular slice of a table
tg = t1.group_by('a')
tg2 = tg[3:5]
assert np.all(tg2.groups.indices == np.array([0, len(tg2)]))
assert tg2.groups.keys is None
def test_group_column_from_table(T1):
"""
Group a column that is part of a table
"""
cg = T1['c'].group_by(np.array(T1['a']))
assert np.all(cg.groups.keys == np.array([0, 1, 2]))
assert np.all(cg.groups.indices == np.array([0, 1, 4, 8]))
def test_table_groups_mask_index(T1):
"""
Use boolean mask as item in __getitem__ for groups
"""
for masked in (False, True):
t1 = Table(T1, masked=masked).group_by('a')
t2 = t1.groups[np.array([True, False, True])]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[2].pformat()
assert np.all(t2.groups.keys['a'] == np.array([0, 2]))
def test_table_groups_array_index(T1):
"""
Use numpy array as item in __getitem__ for groups
"""
for masked in (False, True):
t1 = Table(T1, masked=masked).group_by('a')
t2 = t1.groups[np.array([0, 2])]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[2].pformat()
assert np.all(t2.groups.keys['a'] == np.array([0, 2]))
def test_table_groups_slicing(T1):
"""
Test that slicing table groups works
"""
for masked in (False, True):
t1 = Table(T1, masked=masked).group_by('a')
# slice(0, 2)
t2 = t1.groups[0:2]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[1].pformat()
assert np.all(t2.groups.keys['a'] == np.array([0, 1]))
# slice(1, 2)
t2 = t1.groups[1:2]
assert len(t2.groups) == 1
assert t2.groups[0].pformat() == t1.groups[1].pformat()
assert np.all(t2.groups.keys['a'] == np.array([1]))
# slice(0, 3, 2)
t2 = t1.groups[0:3:2]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[2].pformat()
assert np.all(t2.groups.keys['a'] == np.array([0, 2]))
def test_grouped_item_access(T1):
"""
Test that column slicing preserves grouping
"""
for masked in (False, True):
t1 = Table(T1, masked=masked)
# Regular slice of a table
tg = t1.group_by('a')
tgs = tg['a', 'c', 'd']
assert np.all(tgs.groups.keys == tg.groups.keys)
assert np.all(tgs.groups.indices == tg.groups.indices)
tgsa = tgs.groups.aggregate(np.sum)
assert tgsa.pformat() == [' a c d ',
'--- ---- ---',
' 0 0.0 4',
' 1 6.0 18',
' 2 22.0 6']
tgs = tg['c', 'd']
assert np.all(tgs.groups.keys == tg.groups.keys)
assert np.all(tgs.groups.indices == tg.groups.indices)
tgsa = tgs.groups.aggregate(np.sum)
assert tgsa.pformat() == [' c d ',
'---- ---',
' 0.0 4',
' 6.0 18',
'22.0 6']
def test_mutable_operations(T1):
"""
Operations like adding or deleting a row should removing grouping,
but adding or removing or renaming a column should retain grouping.
"""
for masked in (False, True):
t1 = Table(T1, masked=masked)
# add row
tg = t1.group_by('a')
tg.add_row((0, 'a', 3.0, 4))
assert np.all(tg.groups.indices == np.array([0, len(tg)]))
assert tg.groups.keys is None
# remove row
tg = t1.group_by('a')
tg.remove_row(4)
assert np.all(tg.groups.indices == np.array([0, len(tg)]))
assert tg.groups.keys is None
# add column
tg = t1.group_by('a')
indices = tg.groups.indices.copy()
tg.add_column(Column(name='e', data=np.arange(len(tg))))
assert np.all(tg.groups.indices == indices)
assert np.all(tg['e'].groups.indices == indices)
assert np.all(tg['e'].groups.keys == tg.groups.keys)
# remove column (not key column)
tg = t1.group_by('a')
tg.remove_column('b')
assert np.all(tg.groups.indices == indices)
# Still has original key col names
assert tg.groups.keys.dtype.names == ('a',)
assert np.all(tg['a'].groups.indices == indices)
# remove key column
tg = t1.group_by('a')
tg.remove_column('a')
assert np.all(tg.groups.indices == indices)
assert tg.groups.keys.dtype.names == ('a',)
assert np.all(tg['b'].groups.indices == indices)
# rename key column
tg = t1.group_by('a')
tg.rename_column('a', 'aa')
assert np.all(tg.groups.indices == indices)
assert tg.groups.keys.dtype.names == ('a',)
assert np.all(tg['aa'].groups.indices == indices)
def test_group_by_masked(T1):
t1m = Table(T1, masked=True)
t1m['c'].mask[4] = True
t1m['d'].mask[5] = True
assert t1m.group_by('a').pformat() == [' a b c d ',
'--- --- --- ---',
' 0 a -- 4',
' 1 b 3.0 --',
' 1 a 2.0 6',
' 1 a 1.0 7',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3']
def test_group_by_errors(T1):
"""
Appropriate errors get raised.
"""
# Bad column name as string
with pytest.raises(ValueError):
T1.group_by('f')
# Bad column names in list
with pytest.raises(ValueError):
T1.group_by(['f', 'g'])
# Wrong length array
with pytest.raises(ValueError):
T1.group_by(np.array([1, 2]))
# Wrong type
with pytest.raises(TypeError):
T1.group_by(None)
# Masked key column
t1 = Table(T1, masked=True)
t1['a'].mask[4] = True
with pytest.raises(ValueError):
t1.group_by('a')
def test_groups_keys_meta(T1):
"""
Make sure the keys meta['grouped_by_table_cols'] is working.
"""
# Group by column in this table
tg = T1.group_by('a')
assert tg.groups.keys.meta['grouped_by_table_cols'] is True
assert tg['c'].groups.keys.meta['grouped_by_table_cols'] is True
assert tg.groups[1].groups.keys.meta['grouped_by_table_cols'] is True
assert (tg['d'].groups[np.array([False, True, True])]
.groups.keys.meta['grouped_by_table_cols'] is True)
# Group by external Table
tg = T1.group_by(T1['a', 'b'])
assert tg.groups.keys.meta['grouped_by_table_cols'] is False
assert tg['c'].groups.keys.meta['grouped_by_table_cols'] is False
assert tg.groups[1].groups.keys.meta['grouped_by_table_cols'] is False
# Group by external numpy array
tg = T1.group_by(T1['a', 'b'].as_array())
assert not hasattr(tg.groups.keys, 'meta')
assert not hasattr(tg['c'].groups.keys, 'meta')
# Group by Column
tg = T1.group_by(T1['a'])
assert 'grouped_by_table_cols' not in tg.groups.keys.meta
assert 'grouped_by_table_cols' not in tg['c'].groups.keys.meta
def test_table_aggregate(T1):
"""
Aggregate a table
"""
# Table with only summable cols
t1 = T1['a', 'c', 'd']
tg = t1.group_by('a')
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [' a c d ',
'--- ---- ---',
' 0 0.0 4',
' 1 6.0 18',
' 2 22.0 6']
# Reverts to default groups
assert np.all(tga.groups.indices == np.array([0, 3]))
assert tga.groups.keys is None
# metadata survives
assert tga.meta['ta'] == 1
assert tga['c'].meta['a'] == 1
assert tga['c'].description == 'column c'
# Aggregate with np.sum with masked elements. This results
# in one group with no elements, hence a nan result and conversion
# to float for the 'd' column.
t1m = Table(t1, masked=True)
t1m['c'].mask[4:6] = True
t1m['d'].mask[4:6] = True
tg = t1m.group_by('a')
with catch_warnings(Warning) as warning_lines:
tga = tg.groups.aggregate(np.sum)
assert warning_lines[0].category == UserWarning
assert "converting a masked element to nan" in str(warning_lines[0].message)
assert tga.pformat() == [' a c d ',
'--- ---- ----',
' 0 nan nan',
' 1 3.0 13.0',
' 2 22.0 6.0']
# Aggregrate with np.sum with masked elements, but where every
# group has at least one remaining (unmasked) element. Then
# the int column stays as an int.
t1m = Table(t1, masked=True)
t1m['c'].mask[5] = True
t1m['d'].mask[5] = True
tg = t1m.group_by('a')
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [' a c d ',
'--- ---- ---',
' 0 0.0 4',
' 1 3.0 13',
' 2 22.0 6']
# Aggregate with a column type that cannot by supplied to the aggregating
# function. This raises a warning but still works.
tg = T1.group_by('a')
with catch_warnings(Warning) as warning_lines:
tga = tg.groups.aggregate(np.sum)
assert warning_lines[0].category == AstropyUserWarning
assert "Cannot aggregate column" in str(warning_lines[0].message)
assert tga.pformat() == [' a c d ',
'--- ---- ---',
' 0 0.0 4',
' 1 6.0 18',
' 2 22.0 6']
def test_table_aggregate_reduceat(T1):
"""
Aggregate table with functions which have a reduceat method
"""
# Comparison functions without reduceat
def np_mean(x):
return np.mean(x)
def np_sum(x):
return np.sum(x)
def np_add(x):
return np.add(x)
# Table with only summable cols
t1 = T1['a', 'c', 'd']
tg = t1.group_by('a')
# Comparison
tga_r = tg.groups.aggregate(np.sum)
tga_a = tg.groups.aggregate(np.add)
tga_n = tg.groups.aggregate(np_sum)
assert np.all(tga_r == tga_n)
assert np.all(tga_a == tga_n)
assert tga_n.pformat() == [' a c d ',
'--- ---- ---',
' 0 0.0 4',
' 1 6.0 18',
' 2 22.0 6']
tga_r = tg.groups.aggregate(np.mean)
tga_n = tg.groups.aggregate(np_mean)
assert np.all(tga_r == tga_n)
assert tga_n.pformat() == [' a c d ',
'--- --- ---',
' 0 0.0 4.0',
' 1 2.0 6.0',
' 2 5.5 1.5']
# Binary ufunc np_add should raise warning without reduceat
t2 = T1['a', 'c']
tg = t2.group_by('a')
with catch_warnings(Warning) as warning_lines:
tga = tg.groups.aggregate(np_add)
assert warning_lines[0].category == AstropyUserWarning
assert "Cannot aggregate column" in str(warning_lines[0].message)
assert tga.pformat() == [' a ',
'---',
' 0',
' 1',
' 2']
def test_column_aggregate(T1):
"""
Aggregate a single table column
"""
for masked in (False, True):
tg = Table(T1, masked=masked).group_by('a')
tga = tg['c'].groups.aggregate(np.sum)
assert tga.pformat() == [' c ',
'----',
' 0.0',
' 6.0',
'22.0']
def test_table_filter():
"""
Table groups filtering
"""
def all_positive(table, key_colnames):
colnames = [name for name in table.colnames if name not in key_colnames]
for colname in colnames:
if np.any(table[colname] < 0):
return False
return True
# Negative value in 'a' column should not filter because it is a key col
t = Table.read([' a c d',
' -2 7.0 0',
' -2 5.0 1',
' 0 0.0 4',
' 1 3.0 5',
' 1 2.0 -6',
' 1 1.0 7',
' 3 3.0 5',
' 3 -2.0 6',
' 3 1.0 7',
], format='ascii')
tg = t.group_by('a')
t2 = tg.groups.filter(all_positive)
assert t2.groups[0].pformat() == [' a c d ',
'--- --- ---',
' -2 7.0 0',
' -2 5.0 1']
assert t2.groups[1].pformat() == [' a c d ',
'--- --- ---',
' 0 0.0 4']
def test_column_filter():
"""
Table groups filtering
"""
def all_positive(column):
if np.any(column < 0):
return False
return True
# Negative value in 'a' column should not filter because it is a key col
t = Table.read([' a c d',
' -2 7.0 0',
' -2 5.0 1',
' 0 0.0 4',
' 1 3.0 5',
' 1 2.0 -6',
' 1 1.0 7',
' 3 3.0 5',
' 3 -2.0 6',
' 3 1.0 7',
], format='ascii')
tg = t.group_by('a')
c2 = tg['c'].groups.filter(all_positive)
assert len(c2.groups) == 3
assert c2.groups[0].pformat() == [' c ', '---', '7.0', '5.0']
assert c2.groups[1].pformat() == [' c ', '---', '0.0']
assert c2.groups[2].pformat() == [' c ', '---', '3.0', '2.0', '1.0']
def test_group_mixins():
"""
Test grouping a table with mixin columns
"""
# Setup mixins
idx = np.arange(4)
x = np.array([3., 1., 2., 1.])
q = x * u.m
lon = coordinates.Longitude(x * u.deg)
lat = coordinates.Latitude(x * u.deg)
# For Time do J2000.0 + few * 0.1 ns (this requires > 64 bit precision)
tm = time.Time(2000, format='jyear') + time.TimeDelta(x * 1e-10, format='sec')
sc = coordinates.SkyCoord(ra=lon, dec=lat)
aw = table_helpers.ArrayWrapper(x)
nd = np.array([(3, 'c'), (1, 'a'), (2, 'b'), (1, 'a')],
dtype='<i4,|S1').view(NdarrayMixin)
qt = QTable([idx, x, q, lon, lat, tm, sc, aw, nd],
names=['idx', 'x', 'q', 'lon', 'lat', 'tm', 'sc', 'aw', 'nd'])
# Test group_by with each supported mixin type
mixin_keys = ['x', 'q', 'lon', 'lat', 'tm', 'sc', 'aw', 'nd']
for key in mixin_keys:
qtg = qt.group_by(key)
# Test that it got the sort order correct
assert np.all(qtg['idx'] == [1, 3, 2, 0])
# Test that the groups are right
# Note: skip testing SkyCoord column because that doesn't have equality
for name in ['x', 'q', 'lon', 'lat', 'tm', 'aw', 'nd']:
assert np.all(qt[name][[1, 3]] == qtg.groups[0][name])
assert np.all(qt[name][[2]] == qtg.groups[1][name])
assert np.all(qt[name][[0]] == qtg.groups[2][name])
# Test that unique also works with mixins since most of the work is
# done with group_by(). This is using *every* mixin as key.
uqt = unique(qt, keys=mixin_keys)
assert len(uqt) == 3
assert np.all(uqt['idx'] == [1, 2, 0])
assert np.all(uqt['x'] == [1., 2., 3.])
# Column group_by() with mixins
idxg = qt['idx'].group_by(qt[mixin_keys])
assert np.all(idxg == [1, 3, 2, 0])
|
d4d63f142f532825360bad552e6875fd96139178e22a882f77198f990c30e37d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test behavior related to masked tables"""
import pytest
import numpy as np
import numpy.ma as ma
from astropy.table import Column, MaskedColumn, Table
class SetupData:
def setup_method(self, method):
self.a = MaskedColumn(name='a', data=[1, 2, 3], fill_value=1)
self.b = MaskedColumn(name='b', data=[4, 5, 6], mask=True)
self.c = MaskedColumn(name='c', data=[7, 8, 9], mask=False)
self.d_mask = np.array([False, True, False])
self.d = MaskedColumn(name='d', data=[7, 8, 7], mask=self.d_mask)
self.t = Table([self.a, self.b], masked=True)
self.ca = Column(name='ca', data=[1, 2, 3])
class TestPprint(SetupData):
def test_pformat(self):
assert self.t.pformat() == [' a b ', '--- ---', ' 1 --', ' 2 --', ' 3 --']
class TestFilled:
"""Test the filled method in MaskedColumn and Table"""
def setup_method(self, method):
mask = [True, False, False]
self.meta = {'a': 1, 'b': [2, 3]}
a = self.a = MaskedColumn(name='a', data=[1, 2, 3], fill_value=10, mask=mask, meta={'a': 1})
b = self.b = MaskedColumn(name='b', data=[4.0, 5.0, 6.0], fill_value=10.0, mask=mask)
c = self.c = MaskedColumn(name='c', data=['7', '8', '9'], fill_value='1', mask=mask)
def test_filled_column(self):
f = self.a.filled()
assert np.all(f == [10, 2, 3])
assert isinstance(f, Column)
assert not isinstance(f, MaskedColumn)
# Confirm copy, not ref
assert f.meta['a'] == 1
f.meta['a'] = 2
f[1] = 100
assert self.a[1] == 2
assert self.a.meta['a'] == 1
# Fill with arg fill_value not column fill_value
f = self.a.filled(20)
assert np.all(f == [20, 2, 3])
f = self.b.filled()
assert np.all(f == [10.0, 5.0, 6.0])
assert isinstance(f, Column)
f = self.c.filled()
assert np.all(f == ['1', '8', '9'])
assert isinstance(f, Column)
def test_filled_masked_table(self, tableclass):
t = tableclass([self.a, self.b, self.c], meta=self.meta)
f = t.filled()
assert isinstance(f, Table)
assert f.masked is False
assert np.all(f['a'] == [10, 2, 3])
assert np.allclose(f['b'], [10.0, 5.0, 6.0])
assert np.all(f['c'] == ['1', '8', '9'])
# Confirm copy, not ref
assert f.meta['b'] == [2, 3]
f.meta['b'][0] = 20
assert t.meta['b'] == [2, 3]
f['a'][2] = 100
assert t['a'][2] == 3
def test_filled_unmasked_table(self, tableclass):
t = tableclass([(1, 2), ('3', '4')], names=('a', 'b'), meta=self.meta)
f = t.filled()
assert isinstance(f, Table)
assert f.masked is False
assert np.all(f['a'] == t['a'])
assert np.all(f['b'] == t['b'])
# Confirm copy, not ref
assert f.meta['b'] == [2, 3]
f.meta['b'][0] = 20
assert t.meta['b'] == [2, 3]
f['a'][1] = 100
assert t['a'][1] == 2
class TestFillValue(SetupData):
"""Test setting and getting fill value in MaskedColumn and Table"""
def test_init_set_fill_value(self):
"""Check that setting fill_value in the MaskedColumn init works"""
assert self.a.fill_value == 1
c = MaskedColumn(name='c', data=['xxxx', 'yyyy'], fill_value='none')
assert c.fill_value == 'none'
def test_set_get_fill_value_for_bare_column(self):
"""Check set and get of fill value works for bare Column"""
self.d.fill_value = -999
assert self.d.fill_value == -999
assert np.all(self.d.filled() == [7, -999, 7])
def test_set_get_fill_value_for_str_column(self):
c = MaskedColumn(name='c', data=['xxxx', 'yyyy'], mask=[True, False])
# assert np.all(c.filled() == ['N/A', 'yyyy'])
c.fill_value = 'ABCDEF'
assert c.fill_value == 'ABCD' # string truncated to dtype length
assert np.all(c.filled() == ['ABCD', 'yyyy'])
assert np.all(c.filled('XY') == ['XY', 'yyyy'])
def test_table_column_mask_not_ref(self):
"""Table column mask is not ref of original column mask"""
self.b.fill_value = -999
assert self.t['b'].fill_value != -999
def test_set_get_fill_value_for_table_column(self):
"""Check set and get of fill value works for Column in a Table"""
self.t['b'].fill_value = 1
assert self.t['b'].fill_value == 1
assert np.all(self.t['b'].filled() == [1, 1, 1])
def test_data_attribute_fill_and_mask(self):
"""Check that .data attribute preserves fill_value and mask"""
self.t['b'].fill_value = 1
self.t['b'].mask = [True, False, True]
assert self.t['b'].data.fill_value == 1
assert np.all(self.t['b'].data.mask == [True, False, True])
class TestMaskedColumnInit(SetupData):
"""Initialization of a masked column"""
def test_set_mask_and_not_ref(self):
"""Check that mask gets set properly and that it is a copy, not ref"""
assert np.all(~self.a.mask)
assert np.all(self.b.mask)
assert np.all(~self.c.mask)
assert np.all(self.d.mask == self.d_mask)
self.d.mask[0] = True
assert not np.all(self.d.mask == self.d_mask)
def test_set_mask_from_list(self):
"""Set mask from a list"""
mask_list = [False, True, False]
a = MaskedColumn(name='a', data=[1, 2, 3], mask=mask_list)
assert np.all(a.mask == mask_list)
def test_override_existing_mask(self):
"""Override existing mask values"""
mask_list = [False, True, False]
b = MaskedColumn(name='b', data=self.b, mask=mask_list)
assert np.all(b.mask == mask_list)
def test_incomplete_mask_spec(self):
"""Incomplete mask specification raises MaskError"""
mask_list = [False, True]
with pytest.raises(ma.MaskError):
MaskedColumn(name='b', length=4, mask=mask_list)
class TestTableInit(SetupData):
"""Initializing a table"""
def test_mask_true_if_any_input_masked(self):
"""Masking is True if any input is masked"""
t = Table([self.ca, self.a])
assert t.masked is True
t = Table([self.ca])
assert t.masked is False
t = Table([self.ca, ma.array([1, 2, 3])])
assert t.masked is True
def test_mask_false_if_no_input_masked(self):
"""Masking not true if not (requested or input requires mask)"""
t0 = Table([[3, 4]], masked=False)
t1 = Table(t0, masked=True)
t2 = Table(t1, masked=False)
assert not t0.masked
assert t1.masked
assert not t2.masked
def test_mask_property(self):
t = self.t
# Access table mask (boolean structured array) by column name
assert np.all(t.mask['a'] == np.array([False, False, False]))
assert np.all(t.mask['b'] == np.array([True, True, True]))
# Check that setting mask from table mask has the desired effect on column
t.mask['b'] = np.array([False, True, False])
assert np.all(t['b'].mask == np.array([False, True, False]))
# Non-masked table returns None for mask attribute
t2 = Table([self.ca], masked=False)
assert t2.mask is None
# Set mask property globally and verify local correctness
for mask in (True, False):
t.mask = mask
for name in ('a', 'b'):
assert np.all(t[name].mask == mask)
class TestAddColumn:
def test_add_masked_column_to_masked_table(self):
t = Table(masked=True)
assert t.masked
t.add_column(MaskedColumn(name='a', data=[1, 2, 3], mask=[0, 1, 0]))
assert t.masked
t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1]))
assert t.masked
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
assert np.all(t['b'] == np.array([4, 5, 6]))
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_add_masked_column_to_non_masked_table(self):
t = Table(masked=False)
assert not t.masked
t.add_column(Column(name='a', data=[1, 2, 3]))
assert not t.masked
t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1]))
assert t.masked
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['a'].mask == np.array([0, 0, 0], bool))
assert np.all(t['b'] == np.array([4, 5, 6]))
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_add_non_masked_column_to_masked_table(self):
t = Table(masked=True)
assert t.masked
t.add_column(Column(name='a', data=[1, 2, 3]))
assert t.masked
t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1]))
assert t.masked
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['a'].mask == np.array([0, 0, 0], bool))
assert np.all(t['b'] == np.array([4, 5, 6]))
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_convert_to_masked_table_only_if_necessary(self):
# Do not convert to masked table, if new column has no masked value.
# See #1185 for details.
t = Table(masked=False)
assert not t.masked
t.add_column(Column(name='a', data=[1, 2, 3]))
assert not t.masked
t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[0, 0, 0]))
assert not t.masked
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['b'] == np.array([4, 5, 6]))
class TestRenameColumn:
def test_rename_masked_column(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1, 2, 3], mask=[0, 1, 0]))
t['a'].fill_value = 42
t.rename_column('a', 'b')
assert t.masked
assert np.all(t['b'] == np.array([1, 2, 3]))
assert np.all(t['b'].mask == np.array([0, 1, 0], bool))
assert t['b'].fill_value == 42
assert t.colnames == ['b']
class TestRemoveColumn:
def test_remove_masked_column(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1, 2, 3], mask=[0, 1, 0]))
t['a'].fill_value = 42
t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1]))
t.remove_column('b')
assert t.masked
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
assert t['a'].fill_value == 42
assert t.colnames == ['a']
class TestAddRow:
def test_add_masked_row_to_masked_table_iterable(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
t.add_row([2, 5], mask=[1, 0])
t.add_row([3, 6], mask=[0, 1])
assert t.masked
assert np.all(np.array(t['a']) == np.array([1, 2, 3]))
assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
assert np.all(np.array(t['b']) == np.array([4, 5, 6]))
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping1(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
t.add_row({'b': 5, 'a': 2}, mask={'a': 1, 'b': 0})
t.add_row({'a': 3, 'b': 6}, mask={'b': 1, 'a': 0})
assert t.masked
assert np.all(np.array(t['a']) == np.array([1, 2, 3]))
assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
assert np.all(np.array(t['b']) == np.array([4, 5, 6]))
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping2(self):
# When adding values to a masked table, if the mask is specified as a
# dict, then values not specified will have mask values set to True
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
t.add_row({'b': 5}, mask={'b': 0})
t.add_row({'a': 3}, mask={'a': 0})
assert t.masked
assert t['a'][0] == 1 and t['a'][2] == 3
assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
assert t['b'][1] == 5
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping3(self):
# When adding values to a masked table, if mask is not passed to
# add_row, then the mask should be set to False if values are present
# and True if not.
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
t.add_row({'b': 5})
t.add_row({'a': 3})
assert t.masked
assert t['a'][0] == 1 and t['a'][2] == 3
assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
assert t['b'][1] == 5
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping4(self):
# When adding values to a masked table, if the mask is specified as a
# dict, then keys in values should match keys in mask
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
with pytest.raises(ValueError) as exc:
t.add_row({'b': 5}, mask={'a': True})
assert exc.value.args[0] == 'keys in mask should match keys in vals'
def test_add_masked_row_to_masked_table_mismatch(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
with pytest.raises(TypeError) as exc:
t.add_row([2, 5], mask={'a': 1, 'b': 0})
assert exc.value.args[0] == "Mismatch between type of vals and mask"
with pytest.raises(TypeError) as exc:
t.add_row({'b': 5, 'a': 2}, mask=[1, 0])
assert exc.value.args[0] == "Mismatch between type of vals and mask"
def test_add_masked_row_to_non_masked_table_iterable(self):
t = Table(masked=False)
t.add_column(Column(name='a', data=[1]))
t.add_column(Column(name='b', data=[4]))
assert not t.masked
t.add_row([2, 5])
assert not t.masked
t.add_row([3, 6], mask=[0, 1])
assert t.masked
assert np.all(np.array(t['a']) == np.array([1, 2, 3]))
assert np.all(t['a'].mask == np.array([0, 0, 0], bool))
assert np.all(np.array(t['b']) == np.array([4, 5, 6]))
assert np.all(t['b'].mask == np.array([0, 0, 1], bool))
def test_setting_from_masked_column():
"""Test issue in #2997"""
mask_b = np.array([True, True, False, False])
for select in (mask_b, slice(0, 2)):
t = Table(masked=True)
t['a'] = Column([1, 2, 3, 4])
t['b'] = MaskedColumn([11, 22, 33, 44], mask=mask_b)
t['c'] = MaskedColumn([111, 222, 333, 444], mask=[True, False, True, False])
t['b'][select] = t['c'][select]
assert t['b'][1] == t[1]['b']
assert t['b'][0] is np.ma.masked # Original state since t['c'][0] is masked
assert t['b'][1] == 222 # New from t['c'] since t['c'][1] is unmasked
assert t['b'][2] == 33
assert t['b'][3] == 44
assert np.all(t['b'].mask == t.mask['b']) # Avoid t.mask in general, this is for testing
mask_before_add = t.mask.copy()
t['d'] = np.arange(len(t))
assert np.all(t.mask['b'] == mask_before_add['b'])
def test_coercing_fill_value_type():
"""
Test that masked column fill_value is coerced into the correct column type.
"""
# This is the original example posted on the astropy@scipy mailing list
t = Table({'a': ['1']}, masked=True)
t['a'].set_fill_value('0')
t2 = Table(t, names=['a'], dtype=[np.int32])
assert isinstance(t2['a'].fill_value, np.int32)
# Unit test the same thing.
c = MaskedColumn(['1'])
c.set_fill_value('0')
c2 = MaskedColumn(c, dtype=np.int32)
assert isinstance(c2.fill_value, np.int32)
def test_mask_copy():
"""Test that the mask is copied when copying a table (issue #7362)."""
c = MaskedColumn([1, 2], mask=[False, True])
c2 = MaskedColumn(c, copy=True)
c2.mask[0] = True
assert np.all(c.mask == [False, True])
assert np.all(c2.mask == [True, True])
|
2f74d4bd3864fa0e7a681461101a8c450ac371081026b659665b13460520aeab |
import numpy as np
import pickle
from astropy.table import Table, Column, MaskedColumn, QTable
from astropy.table.table_helpers import simple_table
from astropy.units import Quantity, deg
from astropy.time import Time
from astropy.coordinates import SkyCoord
def test_pickle_column(protocol):
c = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1})
cs = pickle.dumps(c)
cp = pickle.loads(cs)
assert np.all(cp == c)
assert cp.attrs_equal(c)
assert cp._parent_table is None
assert repr(c) == repr(cp)
def test_pickle_masked_column(protocol):
c = MaskedColumn(data=[1, 2], name='a', format='%05d', description='col a', unit='cm',
meta={'a': 1})
c.mask[1] = True
c.fill_value = -99
cs = pickle.dumps(c)
cp = pickle.loads(cs)
assert np.all(cp._data == c._data)
assert np.all(cp.mask == c.mask)
assert cp.attrs_equal(c)
assert cp.fill_value == -99
assert cp._parent_table is None
assert repr(c) == repr(cp)
def test_pickle_multidimensional_column(protocol):
"""Regression test for https://github.com/astropy/astropy/issues/4098"""
a = np.zeros((3, 2))
c = Column(a, name='a')
cs = pickle.dumps(c)
cp = pickle.loads(cs)
assert np.all(c == cp)
assert c.shape == cp.shape
assert cp.attrs_equal(c)
assert repr(c) == repr(cp)
def test_pickle_table(protocol):
a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1})
b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm',
meta={'b': 1})
for table_class in Table, QTable:
t = table_class([a, b], meta={'a': 1, 'b': Quantity(10, unit='s')})
t['c'] = Quantity([1, 2], unit='m')
t['d'] = Time(['2001-01-02T12:34:56', '2001-02-03T00:01:02'])
t['e'] = SkyCoord([125.0, 180.0]*deg, [-45.0, 36.5]*deg)
ts = pickle.dumps(t)
tp = pickle.loads(ts)
assert tp.__class__ is table_class
assert np.all(tp['a'] == t['a'])
assert np.all(tp['b'] == t['b'])
# test mixin columns
assert np.all(tp['c'] == t['c'])
assert np.all(tp['d'] == t['d'])
assert np.all(tp['e'].ra == t['e'].ra)
assert np.all(tp['e'].dec == t['e'].dec)
assert type(tp['c']) is type(t['c']) # nopep8
assert type(tp['d']) is type(t['d']) # nopep8
assert type(tp['e']) is type(t['e']) # nopep8
assert tp.meta == t.meta
assert type(tp) is type(t)
assert isinstance(tp['c'], Quantity if (table_class is QTable) else Column)
def test_pickle_masked_table(protocol):
a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1})
b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm',
meta={'b': 1})
t = Table([a, b], meta={'a': 1}, masked=True)
t['a'].mask[1] = True
t['a'].fill_value = -99
ts = pickle.dumps(t)
tp = pickle.loads(ts)
for colname in ('a', 'b'):
for attr in ('_data', 'mask', 'fill_value'):
assert np.all(getattr(tp[colname], attr) == getattr(tp[colname], attr))
assert tp['a'].attrs_equal(t['a'])
assert tp['b'].attrs_equal(t['b'])
assert tp.meta == t.meta
def test_pickle_indexed_table(protocol):
"""
Ensure that any indices that have been added will survive pickling.
"""
t = simple_table()
t.add_index('a')
t.add_index(['a', 'b'])
ts = pickle.dumps(t)
tp = pickle.loads(ts)
assert len(t.indices) == len(tp.indices)
for index, indexp in zip(t.indices, tp.indices):
assert np.all(index.data.data == indexp.data.data)
assert index.data.data.colnames == indexp.data.data.colnames
|
6a7634a703a3b973d1c50c88ea888358057407437994d80367342b30eaa44f71 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
All of the py.test fixtures used by astropy.table are defined here.
The fixtures can not be defined in the modules that use them, because
those modules are imported twice: once with `from __future__ import
unicode_literals` and once without. py.test complains when the same
fixtures are defined more than once.
`conftest.py` is a "special" module name for py.test that is always
imported, but is not looked in for tests, and it is the recommended
place to put fixtures that are shared between modules. These fixtures
can not be defined in a module by a different name and still be shared
between modules.
"""
from copy import deepcopy
from collections import OrderedDict
import pickle
import pytest
import numpy as np
from astropy import table
from astropy.table import table_helpers, Table, QTable
from astropy import time
from astropy import units as u
from astropy import coordinates
from astropy.table import pprint
@pytest.fixture(params=[table.Column, table.MaskedColumn])
def Column(request):
# Fixture to run all the Column tests for both an unmasked (ndarray)
# and masked (MaskedArray) column.
return request.param
class MaskedTable(table.Table):
def __init__(self, *args, **kwargs):
kwargs['masked'] = True
table.Table.__init__(self, *args, **kwargs)
class MyRow(table.Row):
pass
class MyColumn(table.Column):
pass
class MyMaskedColumn(table.MaskedColumn):
pass
class MyTableColumns(table.TableColumns):
pass
class MyTableFormatter(pprint.TableFormatter):
pass
class MyTable(table.Table):
Row = MyRow
Column = MyColumn
MaskedColumn = MyMaskedColumn
TableColumns = MyTableColumns
TableFormatter = MyTableFormatter
# Fixture to run all the Column tests for both an unmasked (ndarray)
# and masked (MaskedArray) column.
@pytest.fixture(params=['unmasked', 'masked', 'subclass'])
def table_types(request):
class TableTypes:
def __init__(self, request):
if request.param == 'unmasked':
self.Table = table.Table
self.Column = table.Column
elif request.param == 'masked':
self.Table = MaskedTable
self.Column = table.MaskedColumn
elif request.param == 'subclass':
self.Table = MyTable
self.Column = MyColumn
return TableTypes(request)
# Fixture to run all the Column tests for both an unmasked (ndarray)
# and masked (MaskedArray) column.
@pytest.fixture(params=[False, True])
def table_data(request):
class TableData:
def __init__(self, request):
self.Table = MaskedTable if request.param else table.Table
self.Column = table.MaskedColumn if request.param else table.Column
self.COLS = [
self.Column(name='a', data=[1, 2, 3], description='da',
format='%i', meta={'ma': 1}, unit='ua'),
self.Column(name='b', data=[4, 5, 6], description='db',
format='%d', meta={'mb': 1}, unit='ub'),
self.Column(name='c', data=[7, 8, 9], description='dc',
format='%f', meta={'mc': 1}, unit='ub')]
self.DATA = self.Table(self.COLS)
return TableData(request)
class SubclassTable(table.Table):
pass
@pytest.fixture(params=[True, False])
def tableclass(request):
return table.Table if request.param else SubclassTable
@pytest.fixture(params=list(range(0, pickle.HIGHEST_PROTOCOL + 1)))
def protocol(request):
"""
Fixture to run all the tests for all available pickle protocols.
"""
return request.param
# Fixture to run all tests for both an unmasked (ndarray) and masked
# (MaskedArray) column.
@pytest.fixture(params=[False, True])
def table_type(request):
# return MaskedTable if request.param else table.Table
try:
request.param
return MaskedTable
except AttributeError:
return table.Table
# Stuff for testing mixin columns
MIXIN_COLS = {'quantity': [0, 1, 2, 3] * u.m,
'longitude': coordinates.Longitude([0., 1., 5., 6.]*u.deg,
wrap_angle=180.*u.deg),
'latitude': coordinates.Latitude([5., 6., 10., 11.]*u.deg),
'time': time.Time([2000, 2001, 2002, 2003], format='jyear'),
'skycoord': coordinates.SkyCoord(ra=[0, 1, 2, 3] * u.deg,
dec=[0, 1, 2, 3] * u.deg),
'arraywrap': table_helpers.ArrayWrapper([0, 1, 2, 3]),
'ndarray': np.array([(7, 'a'), (8, 'b'), (9, 'c'), (9, 'c')],
dtype='<i4,|S1').view(table.NdarrayMixin),
}
MIXIN_COLS['earthlocation'] = coordinates.EarthLocation(
lon=MIXIN_COLS['longitude'], lat=MIXIN_COLS['latitude'],
height=MIXIN_COLS['quantity'])
@pytest.fixture(params=sorted(MIXIN_COLS))
def mixin_cols(request):
"""
Fixture to return a set of columns for mixin testing which includes
an index column 'i', two string cols 'a', 'b' (for joins etc), and
one of the available mixin column types.
"""
cols = OrderedDict()
mixin_cols = deepcopy(MIXIN_COLS)
cols['i'] = table.Column([0, 1, 2, 3], name='i')
cols['a'] = table.Column(['a', 'b', 'b', 'c'], name='a')
cols['b'] = table.Column(['b', 'c', 'a', 'd'], name='b')
cols['m'] = mixin_cols[request.param]
return cols
@pytest.fixture(params=[False, True])
def T1(request):
T = Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
T.meta.update({'ta': 1})
T['c'].meta.update({'a': 1})
T['c'].description = 'column c'
if request.param:
T.add_index('a')
return T
@pytest.fixture(params=[Table, QTable])
def operation_table_type(request):
return request.param
|
71fb5b503c0843e380c44eda724bbe91f69f245046413a8903dac9ccdcfc8c21 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import OrderedDict
import pytest
import numpy as np
from astropy.tests.helper import catch_warnings
from astropy.table import Table, QTable, TableMergeError
from astropy.table.operations import _get_out_class
from astropy import units as u
from astropy.utils import metadata
from astropy.utils.metadata import MergeConflictError
from astropy import table
from astropy.time import Time
from astropy.coordinates import SkyCoord
def sort_eq(list1, list2):
return sorted(list1) == sorted(list2)
def skycoord_equal(sc1, sc2):
if not sc1.is_equivalent_frame(sc2):
return False
if sc1.representation_type is not sc2.representation_type:
return False
if sc1.shape != sc2.shape:
return False # Maybe raise ValueError corresponding to future numpy behavior
eq = np.ones(shape=sc1.shape, dtype=bool)
for comp in sc1.data.components:
eq &= getattr(sc1.data, comp) == getattr(sc2.data, comp)
return np.all(eq)
class TestJoin():
def _setup(self, t_cls=Table):
lines1 = [' a b c ',
' 0 foo L1',
' 1 foo L2',
' 1 bar L3',
' 2 bar L4']
lines2 = [' a b d ',
' 1 foo R1',
' 1 foo R2',
' 2 bar R3',
' 4 bar R4']
self.t1 = t_cls.read(lines1, format='ascii')
self.t2 = t_cls.read(lines2, format='ascii')
self.t3 = t_cls(self.t2, copy=True)
self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
self.t3.meta.update(OrderedDict([('b', 3), ('c', [1, 2]), ('d', 2), ('a', 1)]))
self.meta_merge = OrderedDict([('b', [1, 2, 3, 4]),
('c', {'a': 1, 'b': 1}),
('d', 1),
('a', 1)])
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.join(self.t1, self.t2, join_type='inner')
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with catch_warnings() as w:
out = table.join(self.t1, self.t3, join_type='inner')
assert len(w) == 3
assert out.meta == self.t3.meta
with catch_warnings() as w:
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='warn')
assert len(w) == 3
assert out.meta == self.t3.meta
with catch_warnings() as w:
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='silent')
assert len(w) == 0
assert out.meta == self.t3.meta
with pytest.raises(MergeConflictError):
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='error')
with pytest.raises(ValueError):
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='nonsense')
def test_both_unmasked_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Basic join with default parameters (inner join on common keys)
t12 = table.join(t1, t2)
assert type(t12) is operation_table_type
assert type(t12['a']) is type(t1['a'])
assert type(t12['b']) is type(t1['b'])
assert type(t12['c']) is type(t1['c'])
assert type(t12['d']) is type(t2['d'])
assert t12.masked is False
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3'])
# Table meta merged properly
assert t12.meta == self.meta_merge
def test_both_unmasked_left_right_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Left join
t12 = table.join(t1, t2, join_type='left')
assert t12.masked is True
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 foo L1 --',
' 1 bar L3 --',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3'])
# Right join
t12 = table.join(t1, t2, join_type='right')
assert t12.masked is True
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3',
' 4 bar -- R4'])
# Outer join
t12 = table.join(t1, t2, join_type='outer')
assert t12.masked is True
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 foo L1 --',
' 1 bar L3 --',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3',
' 4 bar -- R4'])
# Check that the common keys are 'a', 'b'
t12a = table.join(t1, t2, join_type='outer')
t12b = table.join(t1, t2, join_type='outer', keys=['a', 'b'])
assert np.all(t12a.as_array() == t12b.as_array())
def test_both_unmasked_single_key_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Inner join on 'a' column
t12 = table.join(t1, t2, keys='a')
assert type(t12) is operation_table_type
assert type(t12['a']) is type(t1['a'])
assert type(t12['b_1']) is type(t1['b'])
assert type(t12['c']) is type(t1['c'])
assert type(t12['b_2']) is type(t2['b'])
assert type(t12['d']) is type(t2['d'])
assert t12.masked is False
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3'])
def test_both_unmasked_single_key_left_right_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Left join
t12 = table.join(t1, t2, join_type='left', keys='a')
assert t12.masked is True
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 0 foo L1 -- --',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3'])
# Right join
t12 = table.join(t1, t2, join_type='right', keys='a')
assert t12.masked is True
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3',
' 4 -- -- bar R4'])
# Outer join
t12 = table.join(t1, t2, join_type='outer', keys='a')
assert t12.masked is True
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 0 foo L1 -- --',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3',
' 4 -- -- bar R4'])
def test_masked_unmasked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t1m = operation_table_type(self.t1, masked=True)
t2 = self.t2
# Result should be masked even though not req'd by inner join
t1m2 = table.join(t1m, t2, join_type='inner')
assert t1m2.masked is True
# Result should match non-masked result
t12 = table.join(t1, t2)
assert np.all(t12.as_array() == np.array(t1m2))
# Mask out some values in left table and make sure they propagate
t1m['b'].mask[1] = True
t1m['c'].mask[2] = True
t1m2 = table.join(t1m, t2, join_type='inner', keys='a')
assert sort_eq(t1m2.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 -- L2 foo R1',
' 1 -- L2 foo R2',
' 1 bar -- foo R1',
' 1 bar -- foo R2',
' 2 bar L4 bar R3'])
t21m = table.join(t2, t1m, join_type='inner', keys='a')
assert sort_eq(t21m.pformat(), [' a b_1 d b_2 c ',
'--- --- --- --- ---',
' 1 foo R2 -- L2',
' 1 foo R2 bar --',
' 1 foo R1 -- L2',
' 1 foo R1 bar --',
' 2 bar R3 bar L4'])
def test_masked_masked(self, operation_table_type):
self._setup(operation_table_type)
"""Two masked tables"""
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
t1 = self.t1
t1m = operation_table_type(self.t1, masked=True)
t2 = self.t2
t2m = operation_table_type(self.t2, masked=True)
# Result should be masked even though not req'd by inner join
t1m2m = table.join(t1m, t2m, join_type='inner')
assert t1m2m.masked is True
# Result should match non-masked result
t12 = table.join(t1, t2)
assert np.all(t12.as_array() == np.array(t1m2m))
# Mask out some values in both tables and make sure they propagate
t1m['b'].mask[1] = True
t1m['c'].mask[2] = True
t2m['d'].mask[2] = True
t1m2m = table.join(t1m, t2m, join_type='inner', keys='a')
assert sort_eq(t1m2m.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 -- L2 foo R1',
' 1 -- L2 foo R2',
' 1 bar -- foo R1',
' 1 bar -- foo R2',
' 2 bar L4 bar --'])
def test_col_rename(self, operation_table_type):
self._setup(operation_table_type)
"""
Test auto col renaming when there is a conflict. Use
non-default values of uniq_col_name and table_names.
"""
t1 = self.t1
t2 = self.t2
t12 = table.join(t1, t2, uniq_col_name='x_{table_name}_{col_name}_y',
table_names=['L', 'R'], keys='a')
assert t12.colnames == ['a', 'x_L_b_y', 'c', 'x_R_b_y', 'd']
def test_rename_conflict(self, operation_table_type):
self._setup(operation_table_type)
"""
Test that auto-column rename fails because of a conflict
with an existing column
"""
t1 = self.t1
t2 = self.t2
t1['b_1'] = 1 # Add a new column b_1 that will conflict with auto-rename
with pytest.raises(TableMergeError):
table.join(t1, t2, keys='a')
def test_missing_keys(self, operation_table_type):
self._setup(operation_table_type)
"""Merge on a key column that doesn't exist"""
t1 = self.t1
t2 = self.t2
with pytest.raises(TableMergeError):
table.join(t1, t2, keys=['a', 'not there'])
def test_bad_join_type(self, operation_table_type):
self._setup(operation_table_type)
"""Bad join_type input"""
t1 = self.t1
t2 = self.t2
with pytest.raises(ValueError):
table.join(t1, t2, join_type='illegal value')
def test_no_common_keys(self, operation_table_type):
self._setup(operation_table_type)
"""Merge tables with no common keys"""
t1 = self.t1
t2 = self.t2
del t1['a']
del t1['b']
del t2['a']
del t2['b']
with pytest.raises(TableMergeError):
table.join(t1, t2)
def test_masked_key_column(self, operation_table_type):
self._setup(operation_table_type)
"""Merge on a key column that has a masked element"""
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
t1 = self.t1
t2 = operation_table_type(self.t2, masked=True)
table.join(t1, t2) # OK
t2['a'].mask[0] = True
with pytest.raises(TableMergeError):
table.join(t1, t2)
def test_col_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t2.rename_column('d', 'c') # force col conflict and renaming
meta1 = OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])
meta2 = OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])
# Key col 'a', should first value ('cm')
t1['a'].unit = 'cm'
t2['a'].unit = 'm'
# Key col 'b', take first value 't1_b'
t1['b'].info.description = 't1_b'
# Key col 'b', take first non-empty value 't1_b'
t2['b'].info.format = '%6s'
# Key col 'a', should be merged meta
t1['a'].info.meta = meta1
t2['a'].info.meta = meta2
# Key col 'b', should be meta2
t2['b'].info.meta = meta2
# All these should pass through
t1['c'].info.format = '%3s'
t1['c'].info.description = 't1_c'
t2['c'].info.format = '%6s'
t2['c'].info.description = 't2_c'
with catch_warnings(metadata.MergeConflictWarning) as warning_lines:
t12 = table.join(t1, t2, keys=['a', 'b'])
if operation_table_type is Table:
assert warning_lines[0].category == metadata.MergeConflictWarning
assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)"
in str(warning_lines[0].message))
else:
assert len(warning_lines) == 0
assert t12['a'].unit == 'm'
assert t12['b'].info.description == 't1_b'
assert t12['b'].info.format == '%6s'
assert t12['a'].info.meta == self.meta_merge
assert t12['b'].info.meta == meta2
assert t12['c_1'].info.format == '%3s'
assert t12['c_1'].info.description == 't1_c'
assert t12['c_2'].info.format == '%6s'
assert t12['c_2'].info.description == 't2_c'
def test_join_multidimensional(self, operation_table_type):
self._setup(operation_table_type)
# Regression test for #2984, which was an issue where join did not work
# on multi-dimensional columns.
t1 = operation_table_type()
t1['a'] = [1, 2, 3]
t1['b'] = np.ones((3, 4))
t2 = operation_table_type()
t2['a'] = [1, 2, 3]
t2['c'] = [4, 5, 6]
t3 = table.join(t1, t2)
np.testing.assert_allclose(t3['a'], t1['a'])
np.testing.assert_allclose(t3['b'], t1['b'])
np.testing.assert_allclose(t3['c'], t2['c'])
def test_join_multidimensional_masked(self, operation_table_type):
self._setup(operation_table_type)
"""
Test for outer join with multidimensional columns where masking is required.
(Issue #4059).
"""
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
a = table.MaskedColumn([1, 2, 3], name='a')
a2 = table.Column([1, 3, 4], name='a')
b = table.MaskedColumn([[1, 2],
[3, 4],
[5, 6]],
name='b',
mask=[[1, 0],
[0, 1],
[0, 0]])
c = table.Column([[1, 1],
[2, 2],
[3, 3]],
name='c')
t1 = operation_table_type([a, b])
t2 = operation_table_type([a2, c])
t12 = table.join(t1, t2, join_type='inner')
assert np.all(t12['b'].mask == [[True, False],
[False, False]])
assert np.all(t12['c'].mask == [[False, False],
[False, False]])
t12 = table.join(t1, t2, join_type='outer')
assert np.all(t12['b'].mask == [[True, False],
[False, True],
[False, False],
[True, True]])
assert np.all(t12['c'].mask == [[False, False],
[True, True],
[False, False],
[False, False]])
def test_mixin_functionality(self, mixin_cols):
col = mixin_cols['m']
cls_name = type(col).__name__
len_col = len(col)
idx = np.arange(len_col)
t1 = table.QTable([idx, col], names=['idx', 'm1'])
t2 = table.QTable([idx, col], names=['idx', 'm2'])
# Set up join mismatches for different join_type cases
t1 = t1[[0, 1, 3]]
t2 = t2[[0, 2, 3]]
# Test inner join, which works for all mixin_cols
out = table.join(t1, t2, join_type='inner')
assert len(out) == 2
assert out['m2'].__class__ is col.__class__
assert np.all(out['idx'] == [0, 3])
if cls_name == 'SkyCoord':
# SkyCoord doesn't support __eq__ so use our own
assert skycoord_equal(out['m1'], col[[0, 3]])
assert skycoord_equal(out['m2'], col[[0, 3]])
else:
assert np.all(out['m1'] == col[[0, 3]])
assert np.all(out['m2'] == col[[0, 3]])
# Check for left, right, outer join which requires masking. Only Time
# supports this currently.
if cls_name == 'Time':
out = table.join(t1, t2, join_type='left')
assert len(out) == 3
assert np.all(out['idx'] == [0, 1, 3])
assert np.all(out['m1'] == t1['m1'])
assert np.all(out['m2'] == t2['m2'])
assert np.all(out['m1'].mask == [False, False, False])
assert np.all(out['m2'].mask == [False, True, False])
out = table.join(t1, t2, join_type='right')
assert len(out) == 3
assert np.all(out['idx'] == [0, 2, 3])
assert np.all(out['m1'] == t1['m1'])
assert np.all(out['m2'] == t2['m2'])
assert np.all(out['m1'].mask == [False, True, False])
assert np.all(out['m2'].mask == [False, False, False])
out = table.join(t1, t2, join_type='outer')
assert len(out) == 4
assert np.all(out['idx'] == [0, 1, 2, 3])
assert np.all(out['m1'] == col)
assert np.all(out['m2'] == col)
assert np.all(out['m1'].mask == [False, False, True, False])
assert np.all(out['m2'].mask == [False, True, False, False])
else:
# Otherwise make sure it fails with the right exception message
for join_type in ('outer', 'left', 'right'):
with pytest.raises(NotImplementedError) as err:
table.join(t1, t2, join_type='outer')
assert ('join requires masking' in str(err) or
'join unavailable' in str(err))
class TestSetdiff():
def _setup(self, t_cls=Table):
lines1 = [' a b ',
' 0 foo ',
' 1 foo ',
' 1 bar ',
' 2 bar ']
lines2 = [' a b ',
' 0 foo ',
' 3 foo ',
' 4 bar ',
' 2 bar ']
lines3 = [' a b d ',
' 0 foo R1',
' 8 foo R2',
' 1 bar R3',
' 4 bar R4']
self.t1 = t_cls.read(lines1, format='ascii')
self.t2 = t_cls.read(lines2, format='ascii')
self.t3 = t_cls.read(lines3, format='ascii')
def test_default_same_columns(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t2)
assert type(out['a']) is type(self.t1['a'])
assert type(out['b']) is type(self.t1['b'])
assert out.pformat() == [' a b ',
'--- ---',
' 1 bar',
' 1 foo']
def test_default_same_tables(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t1)
assert type(out['a']) is type(self.t1['a'])
assert type(out['b']) is type(self.t1['b'])
assert out.pformat() == [' a b ',
'--- ---']
def test_extra_col_left_table(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
out = table.setdiff(self.t3, self.t1)
def test_extra_col_right_table(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t3)
assert type(out['a']) is type(self.t1['a'])
assert type(out['b']) is type(self.t1['b'])
assert out.pformat() == [' a b ',
'--- ---',
' 1 foo',
' 2 bar']
def test_keys(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t3, self.t1, keys=['a', 'b'])
assert type(out['a']) is type(self.t1['a'])
assert type(out['b']) is type(self.t1['b'])
assert out.pformat() == [' a b d ',
'--- --- ---',
' 4 bar R4',
' 8 foo R2']
def test_missing_key(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
out = table.setdiff(self.t3, self.t1, keys=['a', 'd'])
class TestVStack():
def _setup(self, t_cls=Table):
self.t1 = t_cls.read([' a b',
' 0. foo',
' 1. bar'], format='ascii')
self.t2 = t_cls.read([' a b c',
' 2. pez 4',
' 3. sez 5'], format='ascii')
self.t3 = t_cls.read([' a b',
' 4. 7',
' 5. 8',
' 6. 9'], format='ascii')
self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table)
# The following table has meta-data that conflicts with t1
self.t5 = t_cls(self.t1, copy=True)
self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
self.t4.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
self.t5.meta.update(OrderedDict([('b', 3), ('c', 'k'), ('d', 1)]))
self.meta_merge = OrderedDict([('b', [1, 2, 3, 4, 5, 6]),
('c', {'a': 1, 'b': 1, 'c': 1}),
('d', 1),
('a', 1),
('e', 1)])
def test_stack_rows(self, operation_table_type):
self._setup(operation_table_type)
t2 = self.t1.copy()
t2.meta.clear()
out = table.vstack([self.t1, t2[1]])
assert type(out['a']) is type(self.t1['a'])
assert type(out['b']) is type(self.t1['b'])
assert out.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'1.0 bar']
def test_stack_table_column(self, operation_table_type):
self._setup(operation_table_type)
t2 = self.t1.copy()
t2.meta.clear()
out = table.vstack([self.t1, t2['a']])
assert out.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'0.0 --',
'1.0 --']
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.vstack([self.t1, self.t2, self.t4], join_type='inner')
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with catch_warnings() as w:
out = table.vstack([self.t1, self.t5], join_type='inner')
assert len(w) == 2
assert out.meta == self.t5.meta
with catch_warnings() as w:
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='warn')
assert len(w) == 2
assert out.meta == self.t5.meta
with catch_warnings() as w:
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='silent')
assert len(w) == 0
assert out.meta == self.t5.meta
with pytest.raises(MergeConflictError):
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='error')
with pytest.raises(ValueError):
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='nonsense')
def test_bad_input_type(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.vstack([])
with pytest.raises(TypeError):
table.vstack(1)
with pytest.raises(TypeError):
table.vstack([self.t2, 1])
with pytest.raises(ValueError):
table.vstack([self.t1, self.t2], join_type='invalid join type')
def test_stack_basic_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t12 = table.vstack([t1, t2], join_type='inner')
assert t12.masked is False
assert type(t12) is operation_table_type
assert type(t12['a']) is type(t1['a'])
assert type(t12['b']) is type(t1['b'])
assert t12.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'2.0 pez',
'3.0 sez']
t124 = table.vstack([t1, t2, t4], join_type='inner')
assert type(t124) is operation_table_type
assert type(t12['a']) is type(t1['a'])
assert type(t12['b']) is type(t1['b'])
assert t124.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'2.0 pez',
'3.0 sez',
'0.0 foo',
'1.0 bar']
def test_stack_basic_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t12 = table.vstack([t1, t2], join_type='outer')
assert t12.pformat() == [' a b c ',
'--- --- ---',
'0.0 foo --',
'1.0 bar --',
'2.0 pez 4',
'3.0 sez 5']
t124 = table.vstack([t1, t2, t4], join_type='outer')
assert t124.pformat() == [' a b c ',
'--- --- ---',
'0.0 foo --',
'1.0 bar --',
'2.0 pez 4',
'3.0 sez 5',
'0.0 foo --',
'1.0 bar --']
def test_stack_incompatible(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, self.t3], join_type='inner')
assert ("The 'b' columns have incompatible types: {0}"
.format([self.t1['b'].dtype.name, self.t3['b'].dtype.name])
in str(excinfo))
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, self.t3], join_type='outer')
assert "The 'b' columns have incompatible types:" in str(excinfo)
with pytest.raises(TableMergeError):
table.vstack([self.t1, self.t2], join_type='exact')
t1_reshape = self.t1.copy()
t1_reshape['b'].shape = [2, 1]
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, t1_reshape])
assert "have different shape" in str(excinfo)
def test_vstack_one_masked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t4 = self.t4
t4['b'].mask[1] = True
assert table.vstack([t1, t4]).pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'0.0 foo',
'1.0 --']
def test_col_meta_merge_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Key col 'a', should last value ('km')
t1['a'].info.unit = 'cm'
t2['a'].info.unit = 'm'
t4['a'].info.unit = 'km'
# Key col 'a' format should take last when all match
t1['a'].info.format = '%f'
t2['a'].info.format = '%f'
t4['a'].info.format = '%f'
# Key col 'b', take first value 't1_b'
t1['b'].info.description = 't1_b'
# Key col 'b', take first non-empty value '%6s'
t4['b'].info.format = '%6s'
# Key col 'a', should be merged meta
t1['a'].info.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
t2['a'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t4['a'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
# Key col 'b', should be meta2
t2['b'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
with catch_warnings(metadata.MergeConflictWarning) as warning_lines:
out = table.vstack([t1, t2, t4], join_type='inner')
if operation_table_type is Table:
assert warning_lines[0].category == metadata.MergeConflictWarning
assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)"
in str(warning_lines[0].message))
assert warning_lines[1].category == metadata.MergeConflictWarning
assert ("In merged column 'a' the 'unit' attribute does not match (m != km)"
in str(warning_lines[1].message))
# Check units are suitably ignored for a regular Table
assert out.pformat() == [' a b ',
' km ',
'-------- ------',
'0.000000 foo',
'1.000000 bar',
'2.000000 pez',
'3.000000 sez',
'0.000000 foo',
'1.000000 bar']
else:
assert len(warning_lines) == 0
# Check QTable correctly dealt with units.
assert out.pformat() == [' a b ',
' km ',
'-------- ------',
'0.000000 foo',
'0.000010 bar',
'0.002000 pez',
'0.003000 sez',
'0.000000 foo',
'1.000000 bar']
assert out['a'].info.unit == 'km'
assert out['a'].info.format == '%f'
assert out['b'].info.description == 't1_b'
assert out['b'].info.format == '%6s'
assert out['a'].info.meta == self.meta_merge
assert out['b'].info.meta == OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])
def test_col_meta_merge_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Key col 'a', should last value ('km')
t1['a'].unit = 'cm'
t2['a'].unit = 'm'
t4['a'].unit = 'km'
# Key col 'a' format should take last when all match
t1['a'].info.format = '%0d'
t2['a'].info.format = '%0d'
t4['a'].info.format = '%0d'
# Key col 'b', take first value 't1_b'
t1['b'].info.description = 't1_b'
# Key col 'b', take first non-empty value '%6s'
t4['b'].info.format = '%6s'
# Key col 'a', should be merged meta
t1['a'].info.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
t2['a'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t4['a'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
# Key col 'b', should be meta2
t2['b'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
# All these should pass through
t2['c'].unit = 'm'
t2['c'].info.format = '%6s'
t2['c'].info.description = 't2_c'
with catch_warnings(metadata.MergeConflictWarning) as warning_lines:
out = table.vstack([t1, t2, t4], join_type='outer')
assert warning_lines[0].category == metadata.MergeConflictWarning
assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)"
in str(warning_lines[0].message))
assert warning_lines[1].category == metadata.MergeConflictWarning
assert ("In merged column 'a' the 'unit' attribute does not match (m != km)"
in str(warning_lines[1].message))
assert out['a'].unit == 'km'
assert out['a'].info.format == '%0d'
assert out['b'].info.description == 't1_b'
assert out['b'].info.format == '%6s'
assert out['a'].info.meta == self.meta_merge
assert out['b'].info.meta == OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])
assert out['c'].info.unit == 'm'
assert out['c'].info.format == '%6s'
assert out['c'].info.description == 't2_c'
def test_vstack_one_table(self, operation_table_type):
self._setup(operation_table_type)
"""Regression test for issue #3313"""
assert (self.t1 == table.vstack(self.t1)).all()
assert (self.t1 == table.vstack([self.t1])).all()
def test_mixin_functionality(self, mixin_cols):
col = mixin_cols['m']
len_col = len(col)
t = table.QTable([col], names=['a'])
cls_name = type(col).__name__
# Vstack works for these classes:
implemented_mixin_classes = ['Quantity', 'Angle', 'Time',
'Latitude', 'Longitude',
'EarthLocation']
if cls_name in implemented_mixin_classes:
out = table.vstack([t, t])
assert len(out) == len_col * 2
assert np.all(out['a'][:len_col] == col)
assert np.all(out['a'][len_col:] == col)
else:
with pytest.raises(NotImplementedError) as err:
table.vstack([t, t])
assert ('vstack unavailable for mixin column type(s): {}'
.format(cls_name) in str(err))
# Check for outer stack which requires masking. Only Time supports
# this currently.
t2 = table.QTable([col], names=['b']) # different from col name for t
if cls_name == 'Time':
out = table.vstack([t, t2], join_type='outer')
assert len(out) == len_col * 2
assert np.all(out['a'][:len_col] == col)
assert np.all(out['b'][len_col:] == col)
assert np.all(out['a'].mask == [False] * len_col + [True] * len_col)
assert np.all(out['b'].mask == [True] * len_col + [False] * len_col)
# check directly stacking mixin columns:
out2 = table.vstack([t, t2['b']])
assert np.all(out['a'] == out2['a'])
assert np.all(out['b'] == out2['b'])
else:
with pytest.raises(NotImplementedError) as err:
table.vstack([t, t2], join_type='outer')
assert ('vstack requires masking' in str(err) or
'vstack unavailable' in str(err))
class TestHStack():
def _setup(self, t_cls=Table):
self.t1 = t_cls.read([' a b',
' 0. foo',
' 1. bar'], format='ascii')
self.t2 = t_cls.read([' a b c',
' 2. pez 4',
' 3. sez 5'], format='ascii')
self.t3 = t_cls.read([' d e',
' 4. 7',
' 5. 8',
' 6. 9'], format='ascii')
self.t4 = t_cls(self.t1, copy=True, masked=True)
self.t4['a'].name = 'f'
self.t4['b'].name = 'g'
# The following table has meta-data that conflicts with t1
self.t5 = t_cls(self.t1, copy=True)
self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
self.t4.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
self.t5.meta.update(OrderedDict([('b', 3), ('c', 'k'), ('d', 1)]))
self.meta_merge = OrderedDict([('b', [1, 2, 3, 4, 5, 6]),
('c', {'a': 1, 'b': 1, 'c': 1}),
('d', 1),
('a', 1),
('e', 1)])
def test_stack_same_table(self, operation_table_type):
"""
From #2995, test that hstack'ing references to the same table has the
expected output.
"""
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t1])
assert out.pformat() == ['a_1 b_1 a_2 b_2',
'--- --- --- ---',
'0.0 foo 0.0 foo',
'1.0 bar 1.0 bar']
def test_stack_rows(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1[0], self.t2[1]])
assert out.pformat() == ['a_1 b_1 a_2 b_2 c ',
'--- --- --- --- ---',
'0.0 foo 3.0 sez 5']
def test_stack_columns(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t2['c']])
assert type(out['a']) is type(self.t1['a'])
assert type(out['b']) is type(self.t1['b'])
assert type(out['c']) is type(self.t2['c'])
assert out.pformat() == [' a b c ',
'--- --- ---',
'0.0 foo 4',
'1.0 bar 5']
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t2, self.t4], join_type='inner')
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with catch_warnings() as w:
out = table.hstack([self.t1, self.t5], join_type='inner')
assert len(w) == 2
assert out.meta == self.t5.meta
with catch_warnings() as w:
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='warn')
assert len(w) == 2
assert out.meta == self.t5.meta
with catch_warnings() as w:
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='silent')
assert len(w) == 0
assert out.meta == self.t5.meta
with pytest.raises(MergeConflictError):
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='error')
with pytest.raises(ValueError):
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='nonsense')
def test_bad_input_type(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.hstack([])
with pytest.raises(TypeError):
table.hstack(1)
with pytest.raises(TypeError):
table.hstack([self.t2, 1])
with pytest.raises(ValueError):
table.hstack([self.t1, self.t2], join_type='invalid join type')
def test_stack_basic(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t3 = self.t3
t4 = self.t4
out = table.hstack([t1, t2], join_type='inner')
assert out.masked is False
assert type(out) is operation_table_type
assert type(out['a_1']) is type(t1['a'])
assert type(out['b_1']) is type(t1['b'])
assert type(out['a_2']) is type(t2['a'])
assert type(out['b_2']) is type(t2['b'])
assert out.pformat() == ['a_1 b_1 a_2 b_2 c ',
'--- --- --- --- ---',
'0.0 foo 2.0 pez 4',
'1.0 bar 3.0 sez 5']
# stacking as a list gives same result
out_list = table.hstack([t1, t2], join_type='inner')
assert out.pformat() == out_list.pformat()
out = table.hstack([t1, t2], join_type='outer')
assert out.pformat() == out_list.pformat()
out = table.hstack([t1, t2, t3, t4], join_type='outer')
assert out.pformat() == ['a_1 b_1 a_2 b_2 c d e f g ',
'--- --- --- --- --- --- --- --- ---',
'0.0 foo 2.0 pez 4 4.0 7 0.0 foo',
'1.0 bar 3.0 sez 5 5.0 8 1.0 bar',
' -- -- -- -- -- 6.0 9 -- --']
out = table.hstack([t1, t2, t3, t4], join_type='inner')
assert out.pformat() == ['a_1 b_1 a_2 b_2 c d e f g ',
'--- --- --- --- --- --- --- --- ---',
'0.0 foo 2.0 pez 4 4.0 7 0.0 foo',
'1.0 bar 3.0 sez 5 5.0 8 1.0 bar']
def test_stack_incompatible(self, operation_table_type):
self._setup(operation_table_type)
# For join_type exact, which will fail here because n_rows
# does not match
with pytest.raises(TableMergeError):
table.hstack([self.t1, self.t3], join_type='exact')
def test_hstack_one_masked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail()
self._setup(operation_table_type)
t1 = self.t1
t2 = operation_table_type(t1, copy=True, masked=True)
t2.meta.clear()
t2['b'].mask[1] = True
assert table.hstack([t1, t2]).pformat() == ['a_1 b_1 a_2 b_2',
'--- --- --- ---',
'0.0 foo 0.0 foo',
'1.0 bar 1.0 --']
def test_table_col_rename(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t2], join_type='inner',
uniq_col_name='{table_name}_{col_name}',
table_names=('left', 'right'))
assert out.masked is False
assert out.pformat() == ['left_a left_b right_a right_b c ',
'------ ------ ------- ------- ---',
' 0.0 foo 2.0 pez 4',
' 1.0 bar 3.0 sez 5']
def test_col_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t3 = self.t3[:2]
t4 = self.t4
# Just set a bunch of meta and make sure it is the same in output
meta1 = OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])
t1['a'].unit = 'cm'
t1['b'].info.description = 't1_b'
t4['f'].info.format = '%6s'
t1['b'].info.meta.update(meta1)
t3['d'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t4['g'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
t3['e'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t3['d'].unit = 'm'
t3['d'].info.format = '%6s'
t3['d'].info.description = 't3_c'
with catch_warnings(metadata.MergeConflictWarning) as warning_lines:
out = table.hstack([t1, t3, t4], join_type='exact')
assert len(warning_lines) == 0
for t in [t1, t3, t4]:
for name in t.colnames:
for attr in ('meta', 'unit', 'format', 'description'):
assert getattr(out[name].info, attr) == getattr(t[name].info, attr)
# Make sure we got a copy of meta, not ref
t1['b'].info.meta['b'] = None
assert out['b'].info.meta['b'] == [1, 2]
def test_hstack_one_table(self, operation_table_type):
self._setup(operation_table_type)
"""Regression test for issue #3313"""
assert (self.t1 == table.hstack(self.t1)).all()
assert (self.t1 == table.hstack([self.t1])).all()
def test_mixin_functionality(self, mixin_cols):
col1 = mixin_cols['m']
col2 = col1[2:4] # Shorter version of col1
t1 = table.QTable([col1])
t2 = table.QTable([col2])
cls_name = type(col1).__name__
out = table.hstack([t1, t2], join_type='inner')
assert type(out['col0_1']) is type(out['col0_2'])
assert len(out) == len(col2)
# Check that columns are as expected.
if cls_name == 'SkyCoord':
assert skycoord_equal(out['col0_1'], col1[:len(col2)])
assert skycoord_equal(out['col0_2'], col2)
else:
assert np.all(out['col0_1'] == col1[:len(col2)])
assert np.all(out['col0_2'] == col2)
# Time class supports masking, all other mixins do not
if cls_name == 'Time':
out = table.hstack([t1, t2], join_type='outer')
assert len(out) == len(t1)
assert np.all(out['col0_1'] == col1)
assert np.all(out['col0_2'][:len(col2)] == col2)
assert np.all(out['col0_2'].mask == [False, False, True, True])
# check directly stacking mixin columns:
out2 = table.hstack([t1, t2['col0']], join_type='outer')
assert np.all(out['col0_1'] == out2['col0_1'])
assert np.all(out['col0_2'] == out2['col0_2'])
else:
with pytest.raises(NotImplementedError) as err:
table.hstack([t1, t2], join_type='outer')
assert 'hstack requires masking' in str(err)
def test_unique(operation_table_type):
t = operation_table_type.read(
[' a b c d',
' 2 b 7.0 0',
' 1 c 3.0 5',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 1 a 1.0 7',
' 2 b 5.0 1',
' 0 a 0.0 4',
' 1 a 2.0 6',
' 1 c 3.0 5',
], format='ascii')
tu = operation_table_type(np.sort(t[:-1]))
t_all = table.unique(t)
assert sort_eq(t_all.pformat(), tu.pformat())
t_s = t.copy()
del t_s['b', 'c', 'd']
t_all = table.unique(t_s)
assert sort_eq(t_all.pformat(), [' a ',
'---',
' 0',
' 1',
' 2'])
key1 = 'a'
t1a = table.unique(t, key1)
assert sort_eq(t1a.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 c 3.0 5',
' 2 b 7.0 0'])
t1b = table.unique(t, key1, keep='last')
assert sort_eq(t1b.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 c 3.0 5',
' 2 b 5.0 1'])
t1c = table.unique(t, key1, keep='none')
assert sort_eq(t1c.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4'])
key2 = ['a', 'b']
t2a = table.unique(t, key2)
assert sort_eq(t2a.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 a 1.0 7',
' 1 c 3.0 5',
' 2 a 4.0 3',
' 2 b 7.0 0'])
t2b = table.unique(t, key2, keep='last')
assert sort_eq(t2b.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 a 2.0 6',
' 1 c 3.0 5',
' 2 a 4.0 3',
' 2 b 5.0 1'])
t2c = table.unique(t, key2, keep='none')
assert sort_eq(t2c.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 2 a 4.0 3'])
key2 = ['a', 'a']
with pytest.raises(ValueError) as exc:
t2a = table.unique(t, key2)
assert exc.value.args[0] == "duplicate key names"
with pytest.raises(ValueError) as exc:
table.unique(t, key2, keep=True)
assert exc.value.args[0] == (
"'keep' should be one of 'first', 'last', 'none'")
t1_m = operation_table_type(t1a, masked=True)
t1_m['a'].mask[1] = True
with pytest.raises(ValueError) as exc:
t1_mu = table.unique(t1_m)
assert exc.value.args[0] == (
"cannot use columns with masked values as keys; "
"remove column 'a' from keys and rerun unique()")
t1_mu = table.unique(t1_m, silent=True)
assert t1_mu.pformat() == [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 2 b 7.0 0',
' -- c 3.0 5']
with pytest.raises(ValueError) as e:
t1_mu = table.unique(t1_m, silent=True, keys='a')
t1_m = operation_table_type(t, masked=True)
t1_m['a'].mask[1] = True
t1_m['d'].mask[3] = True
# Test that multiple masked key columns get removed in the correct
# order
t1_mu = table.unique(t1_m, keys=['d', 'a', 'b'], silent=True)
assert t1_mu.pformat() == [' a b c d ',
'--- --- --- ---',
' 2 a 4.0 --',
' 2 b 7.0 0',
' -- c 3.0 5']
def test_vstack_bytes(operation_table_type):
"""
Test for issue #5617 when vstack'ing bytes columns in Py3.
This is really an upsteam numpy issue numpy/numpy/#8403.
"""
t = operation_table_type([[b'a']], names=['a'])
assert t['a'].itemsize == 1
t2 = table.vstack([t, t])
assert len(t2) == 2
assert t2['a'].itemsize == 1
def test_vstack_unicode():
"""
Test for problem related to issue #5617 when vstack'ing *unicode*
columns. In this case the character size gets multiplied by 4.
"""
t = table.Table([[u'a']], names=['a'])
assert t['a'].itemsize == 4 # 4-byte / char for U dtype
t2 = table.vstack([t, t])
assert len(t2) == 2
assert t2['a'].itemsize == 4
def test_get_out_class():
c = table.Column([1, 2])
mc = table.MaskedColumn([1, 2])
q = [1, 2] * u.m
assert _get_out_class([c, mc]) is mc.__class__
assert _get_out_class([mc, c]) is mc.__class__
assert _get_out_class([c, c]) is c.__class__
assert _get_out_class([c]) is c.__class__
with pytest.raises(ValueError):
_get_out_class([c, q])
with pytest.raises(ValueError):
_get_out_class([q, c])
def test_masking_required_exception():
"""
Test that outer join, hstack and vstack fail for a mixin column which
does not support masking.
"""
col = [1, 2, 3, 4] * u.m
t1 = table.QTable([[1, 2, 3, 4], col], names=['a', 'b'])
t2 = table.QTable([[1, 2], col[:2]], names=['a', 'c'])
with pytest.raises(NotImplementedError) as err:
table.vstack([t1, t2], join_type='outer')
assert 'vstack requires masking' in str(err)
with pytest.raises(NotImplementedError) as err:
table.hstack([t1, t2], join_type='outer')
assert 'hstack requires masking' in str(err)
with pytest.raises(NotImplementedError) as err:
table.join(t1, t2, join_type='outer')
assert 'join requires masking' in str(err)
def test_stack_columns():
c = table.Column([1, 2])
mc = table.MaskedColumn([1, 2])
q = [1, 2] * u.m
time = Time(['2001-01-02T12:34:56', '2001-02-03T00:01:02'])
sc = SkyCoord([1, 2], [3, 4], unit='deg')
cq = table.Column([11, 22], unit=u.m)
t = table.hstack([c, q])
assert t.__class__ is table.QTable
assert t.masked is False
t = table.hstack([q, c])
assert t.__class__ is table.QTable
assert t.masked is False
t = table.hstack([mc, q])
assert t.__class__ is table.QTable
assert t.masked is True
t = table.hstack([c, mc])
assert t.__class__ is table.Table
assert t.masked is True
t = table.vstack([q, q])
assert t.__class__ is table.QTable
t = table.vstack([c, c])
assert t.__class__ is table.Table
t = table.hstack([c, time])
assert t.__class__ is table.Table
t = table.hstack([c, sc])
assert t.__class__ is table.Table
t = table.hstack([q, time, sc])
assert t.__class__ is table.QTable
with pytest.raises(ValueError):
table.vstack([c, q])
with pytest.raises(ValueError):
t = table.vstack([q, cq])
|
916a00b2971ec9816c5c939e1cdff7c369ae207f423cf9f1ada6b023ede4b04e | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import operator
import pytest
import numpy as np
from astropy.tests.helper import assert_follows_unicode_guidelines, catch_warnings
from astropy import table
from astropy import units as u
class TestColumn():
def test_subclass(self, Column):
c = Column(name='a')
assert isinstance(c, np.ndarray)
c2 = c * 2
assert isinstance(c2, Column)
assert isinstance(c2, np.ndarray)
def test_numpy_ops(self, Column):
"""Show that basic numpy operations with Column behave sensibly"""
arr = np.array([1, 2, 3])
c = Column(arr, name='a')
for op, test_equal in ((operator.eq, True),
(operator.ne, False),
(operator.ge, True),
(operator.gt, False),
(operator.le, True),
(operator.lt, False)):
for eq in (op(c, arr), op(arr, c)):
assert np.all(eq) if test_equal else not np.any(eq)
assert len(eq) == 3
if Column is table.Column:
assert type(eq) == np.ndarray
else:
assert type(eq) == np.ma.core.MaskedArray
assert eq.dtype.str == '|b1'
lt = c - 1 < arr
assert np.all(lt)
def test_numpy_boolean_ufuncs(self, Column):
"""Show that basic numpy operations with Column behave sensibly"""
arr = np.array([1, 2, 3])
c = Column(arr, name='a')
for ufunc, test_true in ((np.isfinite, True),
(np.isinf, False),
(np.isnan, False),
(np.sign, True),
(np.signbit, False)):
result = ufunc(c)
assert len(result) == len(c)
assert np.all(result) if test_true else not np.any(result)
if Column is table.Column:
assert type(result) == np.ndarray
else:
assert type(result) == np.ma.core.MaskedArray
if ufunc is not np.sign:
assert result.dtype.str == '|b1'
def test_view(self, Column):
c = np.array([1, 2, 3], dtype=np.int64).view(Column)
assert repr(c) == "<{0} dtype='int64' length=3>\n1\n2\n3".format(Column.__name__)
def test_format(self, Column):
"""Show that the formatted output from str() works"""
from astropy import conf
with conf.set_temp('max_lines', 8):
c1 = Column(np.arange(2000), name='a', dtype=float,
format='%6.2f')
assert str(c1).splitlines() == [' a ',
'-------',
' 0.00',
' 1.00',
' ...',
'1998.00',
'1999.00',
'Length = 2000 rows']
def test_convert_numpy_array(self, Column):
d = Column([1, 2, 3], name='a', dtype='i8')
np_data = np.array(d)
assert np.all(np_data == d)
np_data = np.array(d, copy=False)
assert np.all(np_data == d)
np_data = np.array(d, dtype='i4')
assert np.all(np_data == d)
def test_convert_unit(self, Column):
d = Column([1, 2, 3], name='a', dtype="f8", unit="m")
d.convert_unit_to("km")
assert np.all(d.data == [0.001, 0.002, 0.003])
def test_array_wrap(self):
"""Test that the __array_wrap__ method converts a reduction ufunc
output that has a different shape into an ndarray view. Without this a
method call like c.mean() returns a Column array object with length=1."""
# Mean and sum for a 1-d float column
c = table.Column(name='a', data=[1., 2., 3.])
assert np.allclose(c.mean(), 2.0)
assert isinstance(c.mean(), (np.floating, float))
assert np.allclose(c.sum(), 6.)
assert isinstance(c.sum(), (np.floating, float))
# Non-reduction ufunc preserves Column class
assert isinstance(np.cos(c), table.Column)
# Sum for a 1-d int column
c = table.Column(name='a', data=[1, 2, 3])
assert np.allclose(c.sum(), 6)
assert isinstance(c.sum(), (np.integer, int))
# Sum for a 2-d int column
c = table.Column(name='a', data=[[1, 2, 3],
[4, 5, 6]])
assert c.sum() == 21
assert isinstance(c.sum(), (np.integer, int))
assert np.all(c.sum(axis=0) == [5, 7, 9])
assert c.sum(axis=0).shape == (3,)
assert isinstance(c.sum(axis=0), np.ndarray)
# Sum and mean for a 1-d masked column
c = table.MaskedColumn(name='a', data=[1., 2., 3.], mask=[0, 0, 1])
assert np.allclose(c.mean(), 1.5)
assert isinstance(c.mean(), (np.floating, float))
assert np.allclose(c.sum(), 3.)
assert isinstance(c.sum(), (np.floating, float))
def test_name_none(self, Column):
"""Can create a column without supplying name, which defaults to None"""
c = Column([1, 2])
assert c.name is None
assert np.all(c == np.array([1, 2]))
def test_quantity_init(self, Column):
c = Column(data=np.array([1, 2, 3]) * u.m)
assert np.all(c.data == np.array([1, 2, 3]))
assert np.all(c.unit == u.m)
c = Column(data=np.array([1, 2, 3]) * u.m, unit=u.cm)
assert np.all(c.data == np.array([100, 200, 300]))
assert np.all(c.unit == u.cm)
def test_attrs_survive_getitem_after_change(self, Column):
"""
Test for issue #3023: when calling getitem with a MaskedArray subclass
the original object attributes are not copied.
"""
c1 = Column([1, 2, 3], name='a', unit='m', format='%i',
description='aa', meta={'a': 1})
c1.name = 'b'
c1.unit = 'km'
c1.format = '%d'
c1.description = 'bb'
c1.meta = {'bbb': 2}
for item in (slice(None, None), slice(None, 1), np.array([0, 2]),
np.array([False, True, False])):
c2 = c1[item]
assert c2.name == 'b'
assert c2.unit is u.km
assert c2.format == '%d'
assert c2.description == 'bb'
assert c2.meta == {'bbb': 2}
# Make sure that calling getitem resulting in a scalar does
# not copy attributes.
val = c1[1]
for attr in ('name', 'unit', 'format', 'description', 'meta'):
assert not hasattr(val, attr)
def test_to_quantity(self, Column):
d = Column([1, 2, 3], name='a', dtype="f8", unit="m")
assert np.all(d.quantity == ([1, 2, 3.] * u.m))
assert np.all(d.quantity.value == ([1, 2, 3.] * u.m).value)
assert np.all(d.quantity == d.to('m'))
assert np.all(d.quantity.value == d.to('m').value)
np.testing.assert_allclose(d.to(u.km).value, ([.001, .002, .003] * u.km).value)
np.testing.assert_allclose(d.to('km').value, ([.001, .002, .003] * u.km).value)
np.testing.assert_allclose(d.to(u.MHz, u.equivalencies.spectral()).value,
[299.792458, 149.896229, 99.93081933])
d_nounit = Column([1, 2, 3], name='a', dtype="f8", unit=None)
with pytest.raises(u.UnitsError):
d_nounit.to(u.km)
assert np.all(d_nounit.to(u.dimensionless_unscaled) == np.array([1, 2, 3]))
# make sure the correct copy/no copy behavior is happening
q = [1, 3, 5]*u.km
# to should always make a copy
d.to(u.km)[:] = q
np.testing.assert_allclose(d, [1, 2, 3])
# explcit copying of the quantity should not change the column
d.quantity.copy()[:] = q
np.testing.assert_allclose(d, [1, 2, 3])
# but quantity directly is a "view", accessing the underlying column
d.quantity[:] = q
np.testing.assert_allclose(d, [1000, 3000, 5000])
# view should also work for integers
d2 = Column([1, 2, 3], name='a', dtype=int, unit="m")
d2.quantity[:] = q
np.testing.assert_allclose(d2, [1000, 3000, 5000])
# but it should fail for strings or other non-numeric tables
d3 = Column(['arg', 'name', 'stuff'], name='a', unit="m")
with pytest.raises(TypeError):
d3.quantity
def test_to_funcunit_quantity(self, Column):
"""
Tests for #8424, check if function-unit can be retrieved from column.
"""
d = Column([1, 2, 3], name='a', dtype="f8", unit="dex(AA)")
assert np.all(d.quantity == ([1, 2, 3] * u.dex(u.AA)))
assert np.all(d.quantity.value == ([1, 2, 3] * u.dex(u.AA)).value)
assert np.all(d.quantity == d.to("dex(AA)"))
assert np.all(d.quantity.value == d.to("dex(AA)").value)
# make sure, casting to linear unit works
q = [10, 100, 1000] * u.AA
np.testing.assert_allclose(d.to(u.AA), q)
def test_item_access_type(self, Column):
"""
Tests for #3095, which forces integer item access to always return a plain
ndarray or MaskedArray, even in the case of a multi-dim column.
"""
integer_types = (int, np.int_)
for int_type in integer_types:
c = Column([[1, 2], [3, 4]])
i0 = int_type(0)
i1 = int_type(1)
assert np.all(c[i0] == [1, 2])
assert type(c[i0]) == (np.ma.MaskedArray if hasattr(Column, 'mask') else np.ndarray)
assert c[i0].shape == (2,)
c01 = c[i0:i1]
assert np.all(c01 == [[1, 2]])
assert isinstance(c01, Column)
assert c01.shape == (1, 2)
c = Column([1, 2])
assert np.all(c[i0] == 1)
assert isinstance(c[i0], np.integer)
assert c[i0].shape == ()
c01 = c[i0:i1]
assert np.all(c01 == [1])
assert isinstance(c01, Column)
assert c01.shape == (1,)
def test_insert_basic(self, Column):
c = Column([0, 1, 2], name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
# Basic insert
c1 = c.insert(1, 100)
assert np.all(c1 == [0, 100, 1, 2])
assert c1.attrs_equal(c)
assert type(c) is type(c1)
if hasattr(c1, 'mask'):
assert c1.data.shape == c1.mask.shape
c1 = c.insert(-1, 100)
assert np.all(c1 == [0, 1, 100, 2])
c1 = c.insert(3, 100)
assert np.all(c1 == [0, 1, 2, 100])
c1 = c.insert(-3, 100)
assert np.all(c1 == [100, 0, 1, 2])
c1 = c.insert(1, [100, 200, 300])
if hasattr(c1, 'mask'):
assert c1.data.shape == c1.mask.shape
# Out of bounds index
with pytest.raises((ValueError, IndexError)):
c1 = c.insert(-4, 100)
with pytest.raises((ValueError, IndexError)):
c1 = c.insert(4, 100)
def test_insert_axis(self, Column):
"""Insert with non-default axis kwarg"""
c = Column([[1, 2], [3, 4]])
c1 = c.insert(1, [5, 6], axis=None)
assert np.all(c1 == [1, 5, 6, 2, 3, 4])
c1 = c.insert(1, [5, 6], axis=1)
assert np.all(c1 == [[1, 5, 2], [3, 6, 4]])
def test_insert_multidim(self, Column):
c = Column([[1, 2],
[3, 4]], name='a', dtype=int)
# Basic insert
c1 = c.insert(1, [100, 200])
assert np.all(c1 == [[1, 2], [100, 200], [3, 4]])
# Broadcast
c1 = c.insert(1, 100)
assert np.all(c1 == [[1, 2], [100, 100], [3, 4]])
# Wrong shape
with pytest.raises(ValueError):
c1 = c.insert(1, [100, 200, 300])
def test_insert_object(self, Column):
c = Column(['a', 1, None], name='a', dtype=object)
# Basic insert
c1 = c.insert(1, [100, 200])
assert np.all(c1 == ['a', [100, 200], 1, None])
def test_insert_masked(self):
c = table.MaskedColumn([0, 1, 2], name='a', fill_value=9999,
mask=[False, True, False])
# Basic insert
c1 = c.insert(1, 100)
assert np.all(c1.data.data == [0, 100, 1, 2])
assert c1.fill_value == 9999
assert np.all(c1.data.mask == [False, False, True, False])
assert type(c) is type(c1)
for mask in (False, True):
c1 = c.insert(1, 100, mask=mask)
assert np.all(c1.data.data == [0, 100, 1, 2])
assert np.all(c1.data.mask == [False, mask, True, False])
def test_insert_masked_multidim(self):
c = table.MaskedColumn([[1, 2],
[3, 4]], name='a', dtype=int)
c1 = c.insert(1, [100, 200], mask=True)
assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]])
assert np.all(c1.data.mask == [[False, False], [True, True], [False, False]])
c1 = c.insert(1, [100, 200], mask=[True, False])
assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]])
assert np.all(c1.data.mask == [[False, False], [True, False], [False, False]])
with pytest.raises(ValueError):
c1 = c.insert(1, [100, 200], mask=[True, False, True])
def test_mask_on_non_masked_table(self):
"""
When table is not masked and trying to set mask on column then
it's Raise AttributeError.
"""
t = table.Table([[1, 2], [3, 4]], names=('a', 'b'), dtype=('i4', 'f8'))
with pytest.raises(AttributeError):
t['a'].mask = [True, False]
class TestAttrEqual():
"""Bunch of tests originally from ATpy that test the attrs_equal method."""
def test_5(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy')
c2 = Column(name='a', dtype=int, unit='mJy')
assert c1.attrs_equal(c2)
def test_6(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert c1.attrs_equal(c2)
def test_7(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='b', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_8(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=float, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_9(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='erg.cm-2.s-1.Hz-1', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_10(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%g',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_11(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='another test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_12(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'e': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_13(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 9, 'd': 12})
assert not c1.attrs_equal(c2)
def test_col_and_masked_col(self):
c1 = table.Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = table.MaskedColumn(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert c1.attrs_equal(c2)
assert c2.attrs_equal(c1)
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
from astropy.utils.tests.test_metadata import MetaBaseTest
class TestMetaColumn(MetaBaseTest):
test_class = table.Column
args = ()
class TestMetaMaskedColumn(MetaBaseTest):
test_class = table.MaskedColumn
args = ()
def test_getitem_metadata_regression():
"""
Regression test for #1471: MaskedArray does not call __array_finalize__ so
the meta-data was not getting copied over. By overloading _update_from we
are able to work around this bug.
"""
# Make sure that meta-data gets propagated with __getitem__
c = table.Column(data=[1, 2], name='a', description='b', unit='m', format="%i", meta={'c': 8})
assert c[1:2].name == 'a'
assert c[1:2].description == 'b'
assert c[1:2].unit == 'm'
assert c[1:2].format == '%i'
assert c[1:2].meta['c'] == 8
c = table.MaskedColumn(data=[1, 2], name='a', description='b', unit='m', format="%i", meta={'c': 8})
assert c[1:2].name == 'a'
assert c[1:2].description == 'b'
assert c[1:2].unit == 'm'
assert c[1:2].format == '%i'
assert c[1:2].meta['c'] == 8
# As above, but with take() - check the method and the function
c = table.Column(data=[1, 2, 3], name='a', description='b', unit='m', format="%i", meta={'c': 8})
for subset in [c.take([0, 1]), np.take(c, [0, 1])]:
assert subset.name == 'a'
assert subset.description == 'b'
assert subset.unit == 'm'
assert subset.format == '%i'
assert subset.meta['c'] == 8
# Metadata isn't copied for scalar values
for subset in [c.take(0), np.take(c, 0)]:
assert subset == 1
assert subset.shape == ()
assert not isinstance(subset, table.Column)
c = table.MaskedColumn(data=[1, 2, 3], name='a', description='b', unit='m', format="%i", meta={'c': 8})
for subset in [c.take([0, 1]), np.take(c, [0, 1])]:
assert subset.name == 'a'
assert subset.description == 'b'
assert subset.unit == 'm'
assert subset.format == '%i'
assert subset.meta['c'] == 8
# Metadata isn't copied for scalar values
for subset in [c.take(0), np.take(c, 0)]:
assert subset == 1
assert subset.shape == ()
assert not isinstance(subset, table.MaskedColumn)
def test_unicode_guidelines():
arr = np.array([1, 2, 3])
c = table.Column(arr, name='a')
assert_follows_unicode_guidelines(c)
def test_scalar_column():
"""
Column is not designed to hold scalars, but for numpy 1.6 this can happen:
>> type(np.std(table.Column([1, 2])))
astropy.table.column.Column
"""
c = table.Column(1.5)
assert repr(c) == '1.5'
assert str(c) == '1.5'
def test_qtable_column_conversion():
"""
Ensures that a QTable that gets assigned a unit switches to be Quantity-y
"""
qtab = table.QTable([[1, 2], [3, 4.2]], names=['i', 'f'])
assert isinstance(qtab['i'], table.column.Column)
assert isinstance(qtab['f'], table.column.Column)
qtab['i'].unit = 'km/s'
assert isinstance(qtab['i'], u.Quantity)
assert isinstance(qtab['f'], table.column.Column)
# should follow from the above, but good to make sure as a #4497 regression test
assert isinstance(qtab['i'][0], u.Quantity)
assert isinstance(qtab[0]['i'], u.Quantity)
assert not isinstance(qtab['f'][0], u.Quantity)
assert not isinstance(qtab[0]['f'], u.Quantity)
# Regression test for #5342: if a function unit is assigned, the column
# should become the appropriate FunctionQuantity subclass.
qtab['f'].unit = u.dex(u.cm/u.s**2)
assert isinstance(qtab['f'], u.Dex)
@pytest.mark.parametrize('masked', [True, False])
def test_string_truncation_warning(masked):
"""
Test warnings associated with in-place assignment to a string
column that results in truncation of the right hand side.
"""
t = table.Table([['aa', 'bb']], names=['a'], masked=masked)
with catch_warnings() as w:
from inspect import currentframe, getframeinfo
t['a'][1] = 'cc'
assert len(w) == 0
t['a'][:] = 'dd'
assert len(w) == 0
with catch_warnings() as w:
frameinfo = getframeinfo(currentframe())
t['a'][0] = 'eee' # replace item with string that gets truncated
assert t['a'][0] == 'ee'
assert len(w) == 1
assert ('truncated right side string(s) longer than 2 character(s)'
in str(w[0].message))
# Make sure the warning points back to the user code line
assert w[0].lineno == frameinfo.lineno + 1
assert w[0].category is table.StringTruncateWarning
assert 'test_column' in w[0].filename
with catch_warnings() as w:
t['a'][:] = ['ff', 'ggg'] # replace item with string that gets truncated
assert np.all(t['a'] == ['ff', 'gg'])
assert len(w) == 1
assert ('truncated right side string(s) longer than 2 character(s)'
in str(w[0].message))
with catch_warnings() as w:
# Test the obscure case of assigning from an array that was originally
# wider than any of the current elements (i.e. dtype is U4 but actual
# elements are U1 at the time of assignment).
val = np.array(['ffff', 'gggg'])
val[:] = ['f', 'g']
t['a'][:] = val
assert np.all(t['a'] == ['f', 'g'])
assert len(w) == 0
def test_string_truncation_warning_masked():
"""
Test warnings associated with in-place assignment to a string
to a masked column, specifically where the right hand side
contains np.ma.masked.
"""
# Test for strings, but also cover assignment of np.ma.masked to
# int and float masked column setting. This was previously only
# covered in an unrelated io.ascii test (test_line_endings) which
# showed an unexpected difference between handling of str and numeric
# masked arrays.
for values in (['a', 'b'], [1, 2], [1.0, 2.0]):
mc = table.MaskedColumn(values)
with catch_warnings() as w:
mc[1] = np.ma.masked
assert len(w) == 0
assert np.all(mc.mask == [False, True])
mc[:] = np.ma.masked
assert len(w) == 0
assert np.all(mc.mask == [True, True])
mc = table.MaskedColumn(['aa', 'bb'])
with catch_warnings() as w:
mc[:] = [np.ma.masked, 'ggg'] # replace item with string that gets truncated
assert mc[1] == 'gg'
assert np.all(mc.mask == [True, False])
assert len(w) == 1
assert ('truncated right side string(s) longer than 2 character(s)'
in str(w[0].message))
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_col_unicode_sandwich_create_from_str(Column):
"""
Create a bytestring Column from strings (including unicode) in Py3.
"""
# a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding.
# Stress the system by injecting non-ASCII characters.
uba = u'bä'
c = Column([uba, 'def'], dtype='S')
assert c.dtype.char == 'S'
assert c[0] == uba
assert isinstance(c[0], str)
assert isinstance(c[:0], table.Column)
assert np.all(c[:2] == np.array([uba, 'def']))
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_col_unicode_sandwich_bytes(Column):
"""
Create a bytestring Column from bytes and ensure that it works in Python 3 in
a convenient way like in Python 2.
"""
# a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding.
# Stress the system by injecting non-ASCII characters.
uba = u'bä'
uba8 = uba.encode('utf-8')
c = Column([uba8, b'def'])
assert c.dtype.char == 'S'
assert c[0] == uba
assert isinstance(c[0], str)
assert isinstance(c[:0], table.Column)
assert np.all(c[:2] == np.array([uba, 'def']))
assert isinstance(c[:], table.Column)
assert c[:].dtype.char == 'S'
# Array / list comparisons
assert np.all(c == [uba, 'def'])
ok = c == [uba8, b'def']
assert type(ok) is type(c.data)
assert ok.dtype.char == '?'
assert np.all(ok)
assert np.all(c == np.array([uba, u'def']))
assert np.all(c == np.array([uba8, b'def']))
# Scalar compare
cmps = (uba, uba8)
for cmp in cmps:
ok = c == cmp
assert type(ok) is type(c.data)
assert np.all(ok == [True, False])
def test_col_unicode_sandwich_unicode():
"""
Sanity check that Unicode Column behaves normally.
"""
# On Py2 the unicode must be ASCII-compatible, else the final test fails.
uba = u'bä'
uba8 = uba.encode('utf-8')
c = table.Column([uba, 'def'], dtype='U')
assert c[0] == uba
assert isinstance(c[:0], table.Column)
assert isinstance(c[0], str)
assert np.all(c[:2] == np.array([uba, 'def']))
assert isinstance(c[:], table.Column)
assert c[:].dtype.char == 'U'
ok = c == [uba, 'def']
assert type(ok) == np.ndarray
assert ok.dtype.char == '?'
assert np.all(ok)
assert np.all(c != [uba8, b'def'])
def test_masked_col_unicode_sandwich():
"""
Create a bytestring MaskedColumn and ensure that it works in Python 3 in
a convenient way like in Python 2.
"""
c = table.MaskedColumn([b'abc', b'def'])
c[1] = np.ma.masked
assert isinstance(c[:0], table.MaskedColumn)
assert isinstance(c[0], str)
assert c[0] == 'abc'
assert c[1] is np.ma.masked
assert isinstance(c[:], table.MaskedColumn)
assert c[:].dtype.char == 'S'
ok = c == ['abc', 'def']
assert ok[0] == True
assert ok[1] is np.ma.masked
assert np.all(c == [b'abc', b'def'])
assert np.all(c == np.array([u'abc', u'def']))
assert np.all(c == np.array([b'abc', b'def']))
for cmp in (u'abc', b'abc'):
ok = c == cmp
assert type(ok) is np.ma.MaskedArray
assert ok[0] == True
assert ok[1] is np.ma.masked
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_unicode_sandwich_set(Column):
"""
Test setting
"""
uba = u'bä'
c = Column([b'abc', b'def'])
c[0] = b'aa'
assert np.all(c == [u'aa', u'def'])
c[0] = uba # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding
assert np.all(c == [uba, u'def'])
assert c.pformat() == [u'None', u'----', ' ' + uba, u' def']
c[:] = b'cc'
assert np.all(c == [u'cc', u'cc'])
c[:] = uba
assert np.all(c == [uba, uba])
c[:] = ''
c[:] = [uba, b'def']
assert np.all(c == [uba, b'def'])
@pytest.mark.parametrize('class1', [table.MaskedColumn, table.Column])
@pytest.mark.parametrize('class2', [table.MaskedColumn, table.Column, str, list])
def test_unicode_sandwich_compare(class1, class2):
"""Test that comparing a bytestring Column/MaskedColumn with various
str (unicode) object types gives the expected result. Tests #6838.
"""
obj1 = class1([b'a', b'c'])
if class2 is str:
obj2 = 'a'
elif class2 is list:
obj2 = ['a', 'b']
else:
obj2 = class2(['a', 'b'])
assert np.all((obj1 == obj2) == [True, False])
assert np.all((obj2 == obj1) == [True, False])
assert np.all((obj1 != obj2) == [False, True])
assert np.all((obj2 != obj1) == [False, True])
assert np.all((obj1 > obj2) == [False, True])
assert np.all((obj2 > obj1) == [False, False])
assert np.all((obj1 <= obj2) == [True, False])
assert np.all((obj2 <= obj1) == [True, True])
assert np.all((obj1 < obj2) == [False, False])
assert np.all((obj2 < obj1) == [False, True])
assert np.all((obj1 >= obj2) == [True, True])
assert np.all((obj2 >= obj1) == [True, False])
def test_unicode_sandwich_masked_compare():
"""Test the fix for #6839 from #6899."""
c1 = table.MaskedColumn(['a', 'b', 'c', 'd'],
mask=[True, False, True, False])
c2 = table.MaskedColumn([b'a', b'b', b'c', b'd'],
mask=[True, True, False, False])
for cmp in ((c1 == c2), (c2 == c1)):
assert cmp[0] is np.ma.masked
assert cmp[1] is np.ma.masked
assert cmp[2] is np.ma.masked
assert cmp[3]
for cmp in ((c1 != c2), (c2 != c1)):
assert cmp[0] is np.ma.masked
assert cmp[1] is np.ma.masked
assert cmp[2] is np.ma.masked
assert not cmp[3]
# Note: comparisons <, >, >=, <= fail to return a masked array entirely,
# see https://github.com/numpy/numpy/issues/10092.
|
e472a7bff8e05c2d1a1ee287c300755ac42ff9737237ce2d4de6342f3d3cea22 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import pytest
import numpy as np
from astropy import table
from astropy.table import Row
from astropy import units as u
from .conftest import MaskedTable
def test_masked_row_with_object_col():
"""
Numpy < 1.8 has a bug in masked array that prevents access a row if there is
a column with object type.
"""
t = table.Table([[1]], dtype=['O'], masked=True)
t['col0'].mask = False
assert t[0]['col0'] == 1
t['col0'].mask = True
assert t[0]['col0'] is np.ma.masked
@pytest.mark.usefixtures('table_types')
class TestRow():
def _setup(self, table_types):
self._table_type = table_types.Table
self._column_type = table_types.Column
@property
def t(self):
# py.test wants to run this method once before table_types is run
# to set Table and Column. In this case just return None, which would
# cause any downstream test to fail if this happened in any other context.
if self._column_type is None:
return None
if not hasattr(self, '_t'):
a = self._column_type(name='a', data=[1, 2, 3], dtype='i8')
b = self._column_type(name='b', data=[4, 5, 6], dtype='i8')
self._t = self._table_type([a, b])
return self._t
def test_subclass(self, table_types):
"""Row is subclass of ndarray and Row"""
self._setup(table_types)
c = Row(self.t, 2)
assert isinstance(c, Row)
def test_values(self, table_types):
"""Row accurately reflects table values and attributes"""
self._setup(table_types)
table = self.t
row = table[1]
assert row['a'] == 2
assert row['b'] == 5
assert row[0] == 2
assert row[1] == 5
assert row.meta is table.meta
assert row.colnames == table.colnames
assert row.columns is table.columns
with pytest.raises(IndexError):
row[2]
if sys.byteorder == 'little':
assert str(row.dtype) == "[('a', '<i8'), ('b', '<i8')]"
else:
assert str(row.dtype) == "[('a', '>i8'), ('b', '>i8')]"
def test_ref(self, table_types):
"""Row is a reference into original table data"""
self._setup(table_types)
table = self.t
row = table[1]
row['a'] = 10
if table_types.Table is not MaskedTable:
assert table['a'][1] == 10
def test_left_equal(self, table_types):
"""Compare a table row to the corresponding structured array row"""
self._setup(table_types)
np_t = self.t.as_array()
if table_types.Table is MaskedTable:
with pytest.raises(ValueError):
self.t[0] == np_t[0]
else:
for row, np_row in zip(self.t, np_t):
assert np.all(row == np_row)
def test_left_not_equal(self, table_types):
"""Compare a table row to the corresponding structured array row"""
self._setup(table_types)
np_t = self.t.as_array()
np_t['a'] = [0, 0, 0]
if table_types.Table is MaskedTable:
with pytest.raises(ValueError):
self.t[0] == np_t[0]
else:
for row, np_row in zip(self.t, np_t):
assert np.all(row != np_row)
def test_right_equal(self, table_types):
"""Test right equal"""
self._setup(table_types)
np_t = self.t.as_array()
if table_types.Table is MaskedTable:
with pytest.raises(ValueError):
self.t[0] == np_t[0]
else:
for row, np_row in zip(self.t, np_t):
assert np.all(np_row == row)
def test_convert_numpy_array(self, table_types):
self._setup(table_types)
d = self.t[1]
np_data = np.array(d)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_void())
assert np_data is not d.as_void()
assert d.colnames == list(np_data.dtype.names)
np_data = np.array(d, copy=False)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_void())
assert np_data is not d.as_void()
assert d.colnames == list(np_data.dtype.names)
with pytest.raises(ValueError):
np_data = np.array(d, dtype=[(str('c'), 'i8'), (str('d'), 'i8')])
def test_format_row(self, table_types):
"""Test formatting row"""
self._setup(table_types)
table = self.t
row = table[0]
assert repr(row).splitlines() == ['<{0} {1}{2}>'
.format(row.__class__.__name__,
'index=0',
' masked=True' if table.masked else ''),
' a b ',
'int64 int64',
'----- -----',
' 1 4']
assert str(row).splitlines() == [' a b ',
'--- ---',
' 1 4']
assert row._repr_html_().splitlines() == ['<i>{0} {1}{2}</i>'
.format(row.__class__.__name__,
'index=0',
' masked=True' if table.masked else ''),
'<table id="table{0}">'.format(id(table)),
'<thead><tr><th>a</th><th>b</th></tr></thead>',
'<thead><tr><th>int64</th><th>int64</th></tr></thead>',
'<tr><td>1</td><td>4</td></tr>',
'</table>']
def test_as_void(self, table_types):
"""Test the as_void() method"""
self._setup(table_types)
table = self.t
row = table[0]
# If masked then with no masks, issue numpy/numpy#483 should come
# into play. Make sure as_void() code is working.
row_void = row.as_void()
if table.masked:
assert isinstance(row_void, np.ma.mvoid)
else:
assert isinstance(row_void, np.void)
assert row_void['a'] == 1
assert row_void['b'] == 4
# Confirm row is a view of table but row_void is not.
table['a'][0] = -100
assert row['a'] == -100
assert row_void['a'] == 1
# Make sure it works for a table that has masked elements
if table.masked:
table['a'].mask = True
# row_void is not a view, need to re-make
assert row_void['a'] == 1
row_void = row.as_void() # but row is a view
assert row['a'] is np.ma.masked
def test_row_and_as_void_with_objects(self, table_types):
"""Test the deprecated data property and as_void() method"""
t = table_types.Table([[{'a': 1}, {'b': 2}]], names=('a',))
assert t[0][0] == {'a': 1}
assert t[0]['a'] == {'a': 1}
assert t[0].as_void()[0] == {'a': 1}
assert t[0].as_void()['a'] == {'a': 1}
def test_bounds_checking(self, table_types):
"""Row gives index error upon creation for out-of-bounds index"""
self._setup(table_types)
for ibad in (-5, -4, 3, 4):
with pytest.raises(IndexError):
self.t[ibad]
def test_row_tuple_column_slice():
"""
Test getting and setting a row using a tuple or list of column names
"""
t = table.QTable([[1, 2, 3] * u.m,
[10., 20., 30.],
[100., 200., 300.],
['x', 'y', 'z']], names=['a', 'b', 'c', 'd'])
# Get a row for index=1
r1 = t[1]
# Column slice with tuple of col names
r1_abc = r1['a', 'b', 'c'] # Row object for these cols
r1_abc_repr = ['<Row index=1>',
' a b c ',
' m ',
'float64 float64 float64',
'------- ------- -------',
' 2.0 20.0 200.0']
assert repr(r1_abc).splitlines() == r1_abc_repr
# Column slice with list of col names
r1_abc = r1[['a', 'b', 'c']]
assert repr(r1_abc).splitlines() == r1_abc_repr
# Make sure setting on a tuple or slice updates parent table and row
r1['c'] = 1000
r1['a', 'b'] = 1000 * u.cm, 100.
assert r1['a'] == 10 * u.m
assert r1['b'] == 100
assert t['a'][1] == 10 * u.m
assert t['b'][1] == 100.
assert t['c'][1] == 1000
# Same but using a list of column names instead of tuple
r1[['a', 'b']] = 2000 * u.cm, 200.
assert r1['a'] == 20 * u.m
assert r1['b'] == 200
assert t['a'][1] == 20 * u.m
assert t['b'][1] == 200.
# Set column slice of column slice
r1_abc['a', 'c'] = -1 * u.m, -10
assert t['a'][1] == -1 * u.m
assert t['b'][1] == 200.
assert t['c'][1] == -10.
# Bad column name
with pytest.raises(KeyError) as err:
t[1]['a', 'not_there']
assert "KeyError: 'not_there'" in str(err)
# Too many values
with pytest.raises(ValueError) as err:
t[1]['a', 'b'] = 1 * u.m, 2, 3
assert 'right hand side must be a sequence' in str(err)
# Something without a length
with pytest.raises(ValueError) as err:
t[1]['a', 'b'] = 1
assert 'right hand side must be a sequence' in str(err)
def test_row_tuple_column_slice_transaction():
"""
Test that setting a row that fails part way through does not
change the table at all.
"""
t = table.QTable([[10., 20., 30.],
[1, 2, 3] * u.m], names=['a', 'b'])
tc = t.copy()
# First one succeeds but second fails.
with pytest.raises(ValueError) as err:
t[1]['a', 'b'] = (-1, -1 * u.s) # Bad unit
assert "'s' (time) and 'm' (length) are not convertible" in str(err)
assert t[1] == tc[1]
def test_uint_indexing():
"""
Test that accessing a row with an unsigned integer
works as with a signed integer. Similarly tests
that printing such a row works.
This is non-trivial: adding a signed and unsigned
integer in numpy results in a float, which is an
invalid slice index.
Regression test for gh-7464.
"""
t = table.Table([[1., 2., 3.]], names='a')
assert t['a'][1] == 2.
assert t['a'][np.int(1)] == 2.
assert t['a'][np.uint(1)] == 2.
assert t[np.uint(1)]['a'] == 2.
trepr = ['<Row index=1>',
' a ',
'float64',
'-------',
' 2.0']
assert repr(t[1]).splitlines() == trepr
assert repr(t[np.int(1)]).splitlines() == trepr
assert repr(t[np.uint(1)]).splitlines() == trepr
|
06daac30e58c6d74140d770841992222684cd2fb85c174eff7776542af0e5a89 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
try:
import h5py # pylint: disable=W0611
except ImportError:
HAS_H5PY = False
else:
HAS_H5PY = True
try:
import yaml # pylint: disable=W0611
HAS_YAML = True
except ImportError:
HAS_YAML = False
import copy
import pickle
from io import StringIO
import pytest
import numpy as np
from astropy.coordinates import EarthLocation
from astropy.table import Table, QTable, join, hstack, vstack, Column, NdarrayMixin
from astropy.table import serialize
from astropy import time
from astropy import coordinates
from astropy import units as u
from astropy.table.column import BaseColumn
from astropy.table import table_helpers
from .conftest import MIXIN_COLS
def test_attributes(mixin_cols):
"""
Required attributes for a column can be set.
"""
m = mixin_cols['m']
m.info.name = 'a'
assert m.info.name == 'a'
m.info.description = 'a'
assert m.info.description == 'a'
# Cannot set unit for these classes
if isinstance(m, (u.Quantity, coordinates.SkyCoord, time.Time)):
with pytest.raises(AttributeError):
m.info.unit = u.m
else:
m.info.unit = u.m
assert m.info.unit is u.m
m.info.format = 'a'
assert m.info.format == 'a'
m.info.meta = {'a': 1}
assert m.info.meta == {'a': 1}
with pytest.raises(AttributeError):
m.info.bad_attr = 1
with pytest.raises(AttributeError):
m.info.bad_attr
def check_mixin_type(table, table_col, in_col):
# We check for QuantityInfo rather than just isinstance(col, u.Quantity)
# since we want to treat EarthLocation as a mixin, even though it is
# a Quantity subclass.
if ((isinstance(in_col.info, u.QuantityInfo) and type(table) is not QTable)
or isinstance(in_col, Column)):
assert type(table_col) is table.ColumnClass
else:
assert type(table_col) is type(in_col)
# Make sure in_col got copied and creating table did not touch it
assert in_col.info.name is None
def test_make_table(table_types, mixin_cols):
"""
Make a table with the columns in mixin_cols, which is an ordered dict of
three cols: 'a' and 'b' are table_types.Column type, and 'm' is a mixin.
"""
t = table_types.Table(mixin_cols)
check_mixin_type(t, t['m'], mixin_cols['m'])
cols = list(mixin_cols.values())
t = table_types.Table(cols, names=('i', 'a', 'b', 'm'))
check_mixin_type(t, t['m'], mixin_cols['m'])
t = table_types.Table(cols)
check_mixin_type(t, t['col3'], mixin_cols['m'])
def test_io_ascii_write():
"""
Test that table with mixin column can be written by io.ascii for
every pure Python writer. No validation of the output is done,
this just confirms no exceptions.
"""
from astropy.io.ascii.connect import _get_connectors_table
t = QTable(MIXIN_COLS)
for fmt in _get_connectors_table():
if fmt['Format'] == 'ascii.ecsv' and not HAS_YAML:
continue
if fmt['Write'] and '.fast_' not in fmt['Format']:
out = StringIO()
t.write(out, format=fmt['Format'])
def test_votable_quantity_write(tmpdir):
"""
Test that table with Quantity mixin column can be round-tripped by
io.votable. Note that FITS and HDF5 mixin support are tested (much more
thoroughly) in their respective subpackage tests
(io/fits/tests/test_connect.py and io/misc/tests/test_hdf5.py).
"""
t = QTable()
t['a'] = u.Quantity([1, 2, 4], unit='Angstrom')
filename = str(tmpdir.join('table-tmp'))
t.write(filename, format='votable', overwrite=True)
qt = QTable.read(filename, format='votable')
assert isinstance(qt['a'], u.Quantity)
assert qt['a'].unit == 'Angstrom'
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_write_fits_standard(tmpdir, table_types):
"""
Test that table with Time mixin columns can be written by io.fits.
Validation of the output is done. Test that io.fits writes a table
containing Time mixin columns that can be partially round-tripped
(metadata scale, location).
Note that we postpone checking the "local" scale, since that cannot
be done with format 'cxcsec', as it requires an epoch.
"""
t = table_types([[1, 2], ['string', 'column']])
for scale in time.STANDARD_TIME_SCALES:
t['a'+scale] = time.Time([[1, 2], [3, 4]], format='cxcsec',
scale=scale, location=EarthLocation(
-2446354, 4237210, 4077985, unit='m'))
t['b'+scale] = time.Time(['1999-01-01T00:00:00.123456789',
'2010-01-01T00:00:00'], scale=scale)
t['c'] = [3., 4.]
filename = str(tmpdir.join('table-tmp'))
# Show that FITS format succeeds
t.write(filename, format='fits', overwrite=True)
tm = table_types.read(filename, format='fits', astropy_native=True)
for scale in time.STANDARD_TIME_SCALES:
for ab in ('a', 'b'):
name = ab + scale
# Assert that the time columns are read as Time
assert isinstance(tm[name], time.Time)
# Assert that the scales round-trip
assert tm[name].scale == t[name].scale
# Assert that the format is jd
assert tm[name].format == 'jd'
# Assert that the location round-trips
assert tm[name].location == t[name].location
# Finally assert that the column data round-trips
assert (tm[name] == t[name]).all()
for name in ('col0', 'col1', 'c'):
# Assert that the non-time columns are read as Column
assert isinstance(tm[name], Column)
# Assert that the non-time columns' data round-trips
assert (tm[name] == t[name]).all()
# Test for conversion of time data to its value, as defined by its format
for scale in time.STANDARD_TIME_SCALES:
for ab in ('a', 'b'):
name = ab + scale
t[name].info.serialize_method['fits'] = 'formatted_value'
t.write(filename, format='fits', overwrite=True)
tm = table_types.read(filename, format='fits')
for scale in time.STANDARD_TIME_SCALES:
for ab in ('a', 'b'):
name = ab + scale
assert not isinstance(tm[name], time.Time)
assert (tm[name] == t[name].value).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_write_fits_local(tmpdir, table_types):
"""
Test that table with a Time mixin with scale local can also be written
by io.fits. Like ``test_io_time_write_fits_standard`` above, but avoiding
``cxcsec`` format, which requires an epoch and thus cannot be used for a
local time scale.
"""
t = table_types([[1, 2], ['string', 'column']])
t['a_local'] = time.Time([[50001, 50002], [50003, 50004]],
format='mjd', scale='local',
location=EarthLocation(-2446354, 4237210, 4077985,
unit='m'))
t['b_local'] = time.Time(['1999-01-01T00:00:00.123456789',
'2010-01-01T00:00:00'], scale='local')
t['c'] = [3., 4.]
filename = str(tmpdir.join('table-tmp'))
# Show that FITS format succeeds
t.write(filename, format='fits', overwrite=True)
tm = table_types.read(filename, format='fits', astropy_native=True)
for ab in ('a', 'b'):
name = ab + '_local'
# Assert that the time columns are read as Time
assert isinstance(tm[name], time.Time)
# Assert that the scales round-trip
assert tm[name].scale == t[name].scale
# Assert that the format is jd
assert tm[name].format == 'jd'
# Assert that the location round-trips
assert tm[name].location == t[name].location
# Finally assert that the column data round-trips
assert (tm[name] == t[name]).all()
for name in ('col0', 'col1', 'c'):
# Assert that the non-time columns are read as Column
assert isinstance(tm[name], Column)
# Assert that the non-time columns' data round-trips
assert (tm[name] == t[name]).all()
# Test for conversion of time data to its value, as defined by its format.
for ab in ('a', 'b'):
name = ab + '_local'
t[name].info.serialize_method['fits'] = 'formatted_value'
t.write(filename, format='fits', overwrite=True)
tm = table_types.read(filename, format='fits')
for ab in ('a', 'b'):
name = ab + '_local'
assert not isinstance(tm[name], time.Time)
assert (tm[name] == t[name].value).all()
def test_votable_mixin_write_fail(mixin_cols):
"""
Test that table with mixin columns (excluding Quantity) cannot be written by
io.votable.
"""
t = QTable(mixin_cols)
# Only do this test if there are unsupported column types (i.e. anything besides
# BaseColumn and Quantity class instances).
unsupported_cols = t.columns.not_isinstance((BaseColumn, u.Quantity))
if not unsupported_cols:
pytest.skip("no unsupported column types")
out = StringIO()
with pytest.raises(ValueError) as err:
t.write(out, format='votable')
assert 'cannot write table with mixin column(s)' in str(err.value)
def test_join(table_types):
"""
Join tables with mixin cols. Use column "i" as proxy for what the
result should be for each mixin.
"""
t1 = table_types.Table()
t1['a'] = table_types.Column(['a', 'b', 'b', 'c'])
t1['i'] = table_types.Column([0, 1, 2, 3])
for name, col in MIXIN_COLS.items():
t1[name] = col
t2 = table_types.Table(t1)
t2['a'] = ['b', 'c', 'a', 'd']
for name, col in MIXIN_COLS.items():
t1[name].info.description = name
t2[name].info.description = name + '2'
for join_type in ('inner', 'left'):
t12 = join(t1, t2, keys='a', join_type=join_type)
idx1 = t12['i_1']
idx2 = t12['i_2']
for name, col in MIXIN_COLS.items():
name1 = name + '_1'
name2 = name + '_2'
assert_table_name_col_equal(t12, name1, col[idx1])
assert_table_name_col_equal(t12, name2, col[idx2])
assert t12[name1].info.description == name
assert t12[name2].info.description == name + '2'
for join_type in ('outer', 'right'):
with pytest.raises(NotImplementedError) as exc:
t12 = join(t1, t2, keys='a', join_type=join_type)
assert 'join requires masking column' in str(exc.value)
with pytest.raises(ValueError) as exc:
t12 = join(t1, t2, keys=['a', 'skycoord'])
assert 'not allowed as a key column' in str(exc.value)
# Join does work for a mixin which is a subclass of np.ndarray
t12 = join(t1, t2, keys=['quantity'])
assert np.all(t12['a_1'] == t1['a'])
def test_hstack(table_types):
"""
Hstack tables with mixin cols. Use column "i" as proxy for what the
result should be for each mixin.
"""
t1 = table_types.Table()
t1['i'] = table_types.Column([0, 1, 2, 3])
for name, col in MIXIN_COLS.items():
t1[name] = col
t1[name].info.description = name
t1[name].info.meta = {'a': 1}
for join_type in ('inner', 'outer'):
for chop in (True, False):
t2 = table_types.Table(t1)
if chop:
t2 = t2[:-1]
if join_type == 'outer':
with pytest.raises(NotImplementedError) as exc:
t12 = hstack([t1, t2], join_type=join_type)
assert 'hstack requires masking column' in str(exc.value)
continue
t12 = hstack([t1, t2], join_type=join_type)
idx1 = t12['i_1']
idx2 = t12['i_2']
for name, col in MIXIN_COLS.items():
name1 = name + '_1'
name2 = name + '_2'
assert_table_name_col_equal(t12, name1, col[idx1])
assert_table_name_col_equal(t12, name2, col[idx2])
for attr in ('description', 'meta'):
assert getattr(t1[name].info, attr) == getattr(t12[name1].info, attr)
assert getattr(t2[name].info, attr) == getattr(t12[name2].info, attr)
def assert_table_name_col_equal(t, name, col):
"""
Assert all(t[name] == col), with special handling for known mixin cols.
"""
if isinstance(col, coordinates.SkyCoord):
assert np.all(t[name].ra == col.ra)
assert np.all(t[name].dec == col.dec)
elif isinstance(col, u.Quantity):
if type(t) is QTable:
assert np.all(t[name] == col)
elif isinstance(col, table_helpers.ArrayWrapper):
assert np.all(t[name].data == col.data)
else:
assert np.all(t[name] == col)
def test_get_items(mixin_cols):
"""
Test that slicing / indexing table gives right values and col attrs inherit
"""
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
m = mixin_cols['m']
m.info.name = 'm'
m.info.format = '{0}'
m.info.description = 'd'
m.info.meta = {'a': 1}
t = QTable([m])
for item in ([1, 3], np.array([0, 2]), slice(1, 3)):
t2 = t[item]
m2 = m[item]
assert_table_name_col_equal(t2, 'm', m[item])
for attr in attrs:
assert getattr(t2['m'].info, attr) == getattr(m.info, attr)
assert getattr(m2.info, attr) == getattr(m.info, attr)
def test_info_preserved_pickle_copy_init(mixin_cols):
"""
Test copy, pickle, and init from class roundtrip preserve info. This
tests not only the mixin classes but a regular column as well.
"""
def pickle_roundtrip(c):
return pickle.loads(pickle.dumps(c))
def init_from_class(c):
return c.__class__(c)
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
for colname in ('i', 'm'):
m = mixin_cols[colname]
m.info.name = colname
m.info.format = '{0}'
m.info.description = 'd'
m.info.meta = {'a': 1}
for func in (copy.copy, copy.deepcopy, pickle_roundtrip, init_from_class):
m2 = func(m)
for attr in attrs:
assert getattr(m2.info, attr) == getattr(m.info, attr)
def test_add_column(mixin_cols):
"""
Test that adding a column preserves values and attributes
"""
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
m = mixin_cols['m']
assert m.info.name is None
# Make sure adding column in various ways doesn't touch
t = QTable([m], names=['a'])
assert m.info.name is None
t['new'] = m
assert m.info.name is None
m.info.name = 'm'
m.info.format = '{0}'
m.info.description = 'd'
m.info.meta = {'a': 1}
t = QTable([m])
# Add columns m2, m3, m4 by two different methods and test expected equality
t['m2'] = m
m.info.name = 'm3'
t.add_columns([m], copy=True)
m.info.name = 'm4'
t.add_columns([m], copy=False)
for name in ('m2', 'm3', 'm4'):
assert_table_name_col_equal(t, name, m)
for attr in attrs:
if attr != 'name':
assert getattr(t['m'].info, attr) == getattr(t[name].info, attr)
# Also check that one can set using a scalar.
s = m[0]
if type(s) is type(m):
# We're not going to worry about testing classes for which scalars
# are a different class than the real array (and thus loose info, etc.)
t['s'] = m[0]
assert_table_name_col_equal(t, 's', m[0])
for attr in attrs:
if attr != 'name':
assert getattr(t['m'].info, attr) == getattr(t['s'].info, attr)
# While we're add it, also check a length-1 table.
t = QTable([m[1:2]], names=['m'])
if type(s) is type(m):
t['s'] = m[0]
assert_table_name_col_equal(t, 's', m[0])
for attr in attrs:
if attr != 'name':
assert getattr(t['m'].info, attr) == getattr(t['s'].info, attr)
def test_vstack():
"""
Vstack tables with mixin cols.
"""
t1 = QTable(MIXIN_COLS)
t2 = QTable(MIXIN_COLS)
with pytest.raises(NotImplementedError):
vstack([t1, t2])
def test_insert_row(mixin_cols):
"""
Test inserting a row, which only works for BaseColumn and Quantity
"""
t = QTable(mixin_cols)
t['m'].info.description = 'd'
if isinstance(t['m'], (u.Quantity, Column, time.Time)):
t.insert_row(1, t[-1])
assert t[1] == t[-1]
assert t['m'].info.description == 'd'
else:
with pytest.raises(ValueError) as exc:
t.insert_row(1, t[-1])
assert "Unable to insert row" in str(exc.value)
def test_insert_row_bad_unit():
"""
Insert a row into a QTable with the wrong unit
"""
t = QTable([[1] * u.m])
with pytest.raises(ValueError) as exc:
t.insert_row(0, (2 * u.m / u.s,))
assert "'m / s' (speed) and 'm' (length) are not convertible" in str(exc.value)
def test_convert_np_array(mixin_cols):
"""
Test that converting to numpy array creates an object dtype and that
each instance in the array has the expected type.
"""
t = QTable(mixin_cols)
ta = t.as_array()
m = mixin_cols['m']
dtype_kind = m.dtype.kind if hasattr(m, 'dtype') else 'O'
assert ta['m'].dtype.kind == dtype_kind
def test_assignment_and_copy():
"""
Test that assignment of an int, slice, and fancy index works.
Along the way test that copying table works.
"""
for name in ('quantity', 'arraywrap'):
m = MIXIN_COLS[name]
t0 = QTable([m], names=['m'])
for i0, i1 in ((1, 2),
(slice(0, 2), slice(1, 3)),
(np.array([1, 2]), np.array([2, 3]))):
t = t0.copy()
t['m'][i0] = m[i1]
if name == 'arraywrap':
assert np.all(t['m'].data[i0] == m.data[i1])
assert np.all(t0['m'].data[i0] == m.data[i0])
assert np.all(t0['m'].data[i0] != t['m'].data[i0])
else:
assert np.all(t['m'][i0] == m[i1])
assert np.all(t0['m'][i0] == m[i0])
assert np.all(t0['m'][i0] != t['m'][i0])
def test_conversion_qtable_table():
"""
Test that a table round trips from QTable => Table => QTable
"""
qt = QTable(MIXIN_COLS)
names = qt.colnames
for name in names:
qt[name].info.description = name
t = Table(qt)
for name in names:
assert t[name].info.description == name
if name == 'quantity':
assert np.all(t['quantity'] == qt['quantity'].value)
assert np.all(t['quantity'].unit is qt['quantity'].unit)
assert isinstance(t['quantity'], t.ColumnClass)
else:
assert_table_name_col_equal(t, name, qt[name])
qt2 = QTable(qt)
for name in names:
assert qt2[name].info.description == name
assert_table_name_col_equal(qt2, name, qt[name])
def test_setitem_as_column_name():
"""
Test for mixin-related regression described in #3321.
"""
t = Table()
t['a'] = ['x', 'y']
t['b'] = 'b' # Previously was failing with KeyError
assert np.all(t['a'] == ['x', 'y'])
assert np.all(t['b'] == ['b', 'b'])
def test_quantity_representation():
"""
Test that table representation of quantities does not have unit
"""
t = QTable([[1, 2] * u.m])
assert t.pformat() == ['col0',
' m ',
'----',
' 1.0',
' 2.0']
def test_skycoord_representation():
"""
Test that skycoord representation works, both in the way that the
values are output and in changing the frame representation.
"""
# With no unit we get "None" in the unit row
c = coordinates.SkyCoord([0], [1], [0], representation_type='cartesian')
t = Table([c])
assert t.pformat() == [' col0 ',
'None,None,None',
'--------------',
' 0.0,1.0,0.0']
# Test that info works with a dynamically changed representation
c = coordinates.SkyCoord([0], [1], [0], unit='m', representation_type='cartesian')
t = Table([c])
assert t.pformat() == [' col0 ',
' m,m,m ',
'-----------',
'0.0,1.0,0.0']
t['col0'].representation_type = 'unitspherical'
assert t.pformat() == [' col0 ',
'deg,deg ',
'--------',
'90.0,0.0']
t['col0'].representation_type = 'cylindrical'
assert t.pformat() == [' col0 ',
' m,deg,m ',
'------------',
'1.0,90.0,0.0']
def test_ndarray_mixin():
"""
Test directly adding a plain structured array into a table instead of the
view as an NdarrayMixin. Once added as an NdarrayMixin then all the previous
tests apply.
"""
a = np.array([(1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')],
dtype='<i4,' + ('|U1'))
b = np.array([(10, 'aa'), (20, 'bb'), (30, 'cc'), (40, 'dd')],
dtype=[('x', 'i4'), ('y', ('U2'))])
c = np.rec.fromrecords([(100, 'raa'), (200, 'rbb'), (300, 'rcc'), (400, 'rdd')],
names=['rx', 'ry'])
d = np.arange(8).reshape(4, 2).view(NdarrayMixin)
# Add one during initialization and the next as a new column.
t = Table([a], names=['a'])
t['b'] = b
t['c'] = c
t['d'] = d
assert isinstance(t['a'], NdarrayMixin)
assert t['a'][1][1] == a[1][1]
assert t['a'][2][0] == a[2][0]
assert t[1]['a'][1] == a[1][1]
assert t[2]['a'][0] == a[2][0]
assert isinstance(t['b'], NdarrayMixin)
assert t['b'][1]['x'] == b[1]['x']
assert t['b'][1]['y'] == b[1]['y']
assert t[1]['b']['x'] == b[1]['x']
assert t[1]['b']['y'] == b[1]['y']
assert isinstance(t['c'], NdarrayMixin)
assert t['c'][1]['rx'] == c[1]['rx']
assert t['c'][1]['ry'] == c[1]['ry']
assert t[1]['c']['rx'] == c[1]['rx']
assert t[1]['c']['ry'] == c[1]['ry']
assert isinstance(t['d'], NdarrayMixin)
assert t['d'][1][0] == d[1][0]
assert t['d'][1][1] == d[1][1]
assert t[1]['d'][0] == d[1][0]
assert t[1]['d'][1] == d[1][1]
assert t.pformat() == [' a b c d [2] ',
'-------- ---------- ------------ ------',
"(1, 'a') (10, 'aa') (100, 'raa') 0 .. 1",
"(2, 'b') (20, 'bb') (200, 'rbb') 2 .. 3",
"(3, 'c') (30, 'cc') (300, 'rcc') 4 .. 5",
"(4, 'd') (40, 'dd') (400, 'rdd') 6 .. 7"]
def test_possible_string_format_functions():
"""
The QuantityInfo info class for Quantity implements a
possible_string_format_functions() method that overrides the
standard pprint._possible_string_format_functions() function.
Test this.
"""
t = QTable([[1, 2] * u.m])
t['col0'].info.format = '%.3f'
assert t.pformat() == [' col0',
' m ',
'-----',
'1.000',
'2.000']
t['col0'].info.format = 'hi {:.3f}'
assert t.pformat() == [' col0 ',
' m ',
'--------',
'hi 1.000',
'hi 2.000']
t['col0'].info.format = '.4f'
assert t.pformat() == [' col0 ',
' m ',
'------',
'1.0000',
'2.0000']
def test_rename_mixin_columns(mixin_cols):
"""
Rename a mixin column.
"""
t = QTable(mixin_cols)
tc = t.copy()
t.rename_column('m', 'mm')
assert t.colnames == ['i', 'a', 'b', 'mm']
if isinstance(t['mm'], table_helpers.ArrayWrapper):
assert np.all(t['mm'].data == tc['m'].data)
elif isinstance(t['mm'], coordinates.SkyCoord):
assert np.all(t['mm'].ra == tc['m'].ra)
assert np.all(t['mm'].dec == tc['m'].dec)
else:
assert np.all(t['mm'] == tc['m'])
def test_represent_mixins_as_columns_unit_fix():
"""
If the unit is invalid for a column that gets serialized this would
cause an exception. Fixed in #7481.
"""
t = Table({'a': [1, 2]}, masked=True)
t['a'].unit = 'not a valid unit'
t['a'].mask[1] = True
serialize.represent_mixins_as_columns(t)
|
858468764a1876e888166ed1fc6b9a911df0b4aa47bcda5770e768eb8804104b | # This Python file uses the following encoding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy import table
from astropy.table import Table, QTable
from astropy.table.table_helpers import simple_table
from astropy import units as u
from astropy.utils import console
BIG_WIDE_ARR = np.arange(2000, dtype=np.float64).reshape(100, 20)
SMALL_ARR = np.arange(18, dtype=np.int64).reshape(6, 3)
@pytest.mark.usefixtures('table_type')
class TestMultiD():
def test_multidim(self, table_type):
"""Test printing with multidimensional column"""
arr = [np.array([[1, 2],
[10, 20]], dtype=np.int64),
np.array([[3, 4],
[30, 40]], dtype=np.int64),
np.array([[5, 6],
[50, 60]], dtype=np.int64)]
t = table_type(arr)
lines = t.pformat()
assert lines == ['col0 [2] col1 [2] col2 [2]',
'-------- -------- --------',
' 1 .. 2 3 .. 4 5 .. 6',
'10 .. 20 30 .. 40 50 .. 60']
lines = t.pformat(html=True)
assert lines == ['<table id="table{id}">'.format(id=id(t)),
'<thead><tr><th>col0 [2]</th><th>col1 [2]</th><th>col2 [2]</th></tr></thead>',
'<tr><td>1 .. 2</td><td>3 .. 4</td><td>5 .. 6</td></tr>',
'<tr><td>10 .. 20</td><td>30 .. 40</td><td>50 .. 60</td></tr>',
'</table>']
nbclass = table.conf.default_notebook_table_class
assert t._repr_html_().splitlines() == [
'<i>{0} masked={1} length=2</i>'.format(table_type.__name__, t.masked),
'<table id="table{id}" class="{nbclass}">'.format(id=id(t), nbclass=nbclass),
'<thead><tr><th>col0 [2]</th><th>col1 [2]</th><th>col2 [2]</th></tr></thead>',
'<thead><tr><th>int64</th><th>int64</th><th>int64</th></tr></thead>',
'<tr><td>1 .. 2</td><td>3 .. 4</td><td>5 .. 6</td></tr>',
'<tr><td>10 .. 20</td><td>30 .. 40</td><td>50 .. 60</td></tr>',
'</table>']
t = table_type([arr])
lines = t.pformat()
assert lines == ['col0 [2,2]',
'----------',
' 1 .. 20',
' 3 .. 40',
' 5 .. 60']
def test_fake_multidim(self, table_type):
"""Test printing with 'fake' multidimensional column"""
arr = [np.array([[(1,)],
[(10,)]], dtype=np.int64),
np.array([[(3,)],
[(30,)]], dtype=np.int64),
np.array([[(5,)],
[(50,)]], dtype=np.int64)]
t = table_type(arr)
lines = t.pformat()
assert lines == ['col0 [1,1] col1 [1,1] col2 [1,1]',
'---------- ---------- ----------',
' 1 3 5',
' 10 30 50']
lines = t.pformat(html=True)
assert lines == ['<table id="table{id}">'.format(id=id(t)),
'<thead><tr><th>col0 [1,1]</th><th>col1 [1,1]</th><th>col2 [1,1]</th></tr></thead>',
'<tr><td>1</td><td>3</td><td>5</td></tr>',
'<tr><td>10</td><td>30</td><td>50</td></tr>',
'</table>']
nbclass = table.conf.default_notebook_table_class
assert t._repr_html_().splitlines() == [
'<i>{0} masked={1} length=2</i>'.format(table_type.__name__, t.masked),
'<table id="table{id}" class="{nbclass}">'.format(id=id(t), nbclass=nbclass),
'<thead><tr><th>col0 [1,1]</th><th>col1 [1,1]</th><th>col2 [1,1]</th></tr></thead>',
'<thead><tr><th>int64</th><th>int64</th><th>int64</th></tr></thead>',
'<tr><td>1</td><td>3</td><td>5</td></tr>', u'<tr><td>10</td><td>30</td><td>50</td></tr>',
'</table>']
t = table_type([arr])
lines = t.pformat()
assert lines == ['col0 [2,1,1]',
'------------',
' 1 .. 10',
' 3 .. 30',
' 5 .. 50']
def test_html_escaping():
t = table.Table([(str('<script>alert("gotcha");</script>'), 2, 3)])
nbclass = table.conf.default_notebook_table_class
assert t._repr_html_().splitlines() == [
'<i>Table length=3</i>',
'<table id="table{id}" class="{nbclass}">'.format(id=id(t), nbclass=nbclass),
'<thead><tr><th>col0</th></tr></thead>',
'<thead><tr><th>str33</th></tr></thead>',
'<tr><td><script>alert("gotcha");</script></td></tr>',
'<tr><td>2</td></tr>',
'<tr><td>3</td></tr>',
'</table>']
@pytest.mark.usefixtures('table_type')
class TestPprint():
def _setup(self, table_type):
self.tb = table_type(BIG_WIDE_ARR)
self.tb['col0'].format = 'e'
self.tb['col1'].format = '.6f'
self.tb['col0'].unit = 'km**2'
self.tb['col19'].unit = 'kg s m**-2'
self.ts = table_type(SMALL_ARR)
def test_empty_table(self, table_type):
t = table_type()
lines = t.pformat()
assert lines == ['<No columns>']
c = repr(t)
assert c.splitlines() == ['<{0} masked={1} length=0>'.format(table_type.__name__, t.masked),
'<No columns>']
def test_format0(self, table_type):
"""Try getting screen size but fail to defaults because testing doesn't
have access to screen (fcntl.ioctl fails).
"""
self._setup(table_type)
arr = np.arange(4000, dtype=np.float64).reshape(100, 40)
lines = table_type(arr).pformat()
nlines, width = console.terminal_size()
assert len(lines) == nlines
for line in lines[:-1]: # skip last "Length = .. rows" line
assert width - 10 < len(line) <= width
def test_format1(self, table_type):
"""Basic test of formatting, unit header row included"""
self._setup(table_type)
lines = self.tb.pformat(max_lines=8, max_width=40)
assert lines == [' col0 col1 ... col19 ',
' km2 ... kg s / m2',
'------------ ----------- ... ---------',
'0.000000e+00 1.000000 ... 19.0',
' ... ... ... ...',
'1.960000e+03 1961.000000 ... 1979.0',
'1.980000e+03 1981.000000 ... 1999.0',
'Length = 100 rows']
def test_format2(self, table_type):
"""Basic test of formatting, unit header row excluded"""
self._setup(table_type)
lines = self.tb.pformat(max_lines=8, max_width=40, show_unit=False)
assert lines == [' col0 col1 ... col19 ',
'------------ ----------- ... ------',
'0.000000e+00 1.000000 ... 19.0',
'2.000000e+01 21.000000 ... 39.0',
' ... ... ... ...',
'1.960000e+03 1961.000000 ... 1979.0',
'1.980000e+03 1981.000000 ... 1999.0',
'Length = 100 rows']
def test_format3(self, table_type):
"""Include the unit header row"""
self._setup(table_type)
lines = self.tb.pformat(max_lines=8, max_width=40, show_unit=True)
assert lines == [' col0 col1 ... col19 ',
' km2 ... kg s / m2',
'------------ ----------- ... ---------',
'0.000000e+00 1.000000 ... 19.0',
' ... ... ... ...',
'1.960000e+03 1961.000000 ... 1979.0',
'1.980000e+03 1981.000000 ... 1999.0',
'Length = 100 rows']
def test_format4(self, table_type):
"""Do not include the name header row"""
self._setup(table_type)
lines = self.tb.pformat(max_lines=8, max_width=40, show_name=False)
assert lines == [' km2 ... kg s / m2',
'------------ ----------- ... ---------',
'0.000000e+00 1.000000 ... 19.0',
'2.000000e+01 21.000000 ... 39.0',
' ... ... ... ...',
'1.960000e+03 1961.000000 ... 1979.0',
'1.980000e+03 1981.000000 ... 1999.0',
'Length = 100 rows']
def test_noclip(self, table_type):
"""Basic table print"""
self._setup(table_type)
lines = self.ts.pformat(max_lines=-1, max_width=-1)
assert lines == ['col0 col1 col2',
'---- ---- ----',
' 0 1 2',
' 3 4 5',
' 6 7 8',
' 9 10 11',
' 12 13 14',
' 15 16 17']
def test_clip1(self, table_type):
"""max lines below hard limit of 8
"""
self._setup(table_type)
lines = self.ts.pformat(max_lines=3, max_width=-1)
assert lines == ['col0 col1 col2',
'---- ---- ----',
' 0 1 2',
' 3 4 5',
' 6 7 8',
' 9 10 11',
' 12 13 14',
' 15 16 17']
def test_clip2(self, table_type):
"""max lines below hard limit of 8 and output longer than 8
"""
self._setup(table_type)
lines = self.ts.pformat(max_lines=3, max_width=-1, show_unit=True, show_dtype=True)
assert lines == [' col0 col1 col2',
' ',
'int64 int64 int64',
'----- ----- -----',
' 0 1 2',
' ... ... ...',
' 15 16 17',
'Length = 6 rows']
def test_clip3(self, table_type):
"""Max lines below hard limit of 8 and max width below hard limit
of 10
"""
self._setup(table_type)
lines = self.ts.pformat(max_lines=3, max_width=1, show_unit=True)
assert lines == ['col0 ...',
' ...',
'---- ...',
' 0 ...',
' ... ...',
' 12 ...',
' 15 ...',
'Length = 6 rows']
def test_clip4(self, table_type):
"""Test a range of max_lines"""
self._setup(table_type)
for max_lines in (0, 1, 4, 5, 6, 7, 8, 100, 101, 102, 103, 104, 130):
lines = self.tb.pformat(max_lines=max_lines, show_unit=False)
assert len(lines) == max(8, min(102, max_lines))
def test_pformat_all(self, table_type):
"""Test that all rows are printed by default"""
self._setup(table_type)
lines = self.tb.pformat_all()
# +3 accounts for the three header lines in this table
assert len(lines) == BIG_WIDE_ARR.shape[0] + 3
@pytest.fixture
def test_pprint_all(self, table_type, capsys):
"""Test that all rows are printed by default"""
self._setup(table_type)
self.tb.pprint_all()
(out, err) = capsys.readouterr()
# +3 accounts for the three header lines in this table
assert len(out) == BIG_WIDE_ARR.shape[0] + 3
@pytest.mark.usefixtures('table_type')
class TestFormat():
def test_column_format(self, table_type):
t = table_type([[1, 2], [3, 4]], names=('a', 'b'))
# default (format=None)
assert str(t['a']) == ' a \n---\n 1\n 2'
# just a plain format string
t['a'].format = '5.2f'
assert str(t['a']) == ' a \n-----\n 1.00\n 2.00'
# Old-style that is almost new-style
t['a'].format = '{ %4.2f }'
assert str(t['a']) == ' a \n--------\n{ 1.00 }\n{ 2.00 }'
# New-style that is almost old-style
t['a'].format = '%{0:}'
assert str(t['a']) == ' a \n---\n %1\n %2'
# New-style with extra spaces
t['a'].format = ' {0:05d} '
assert str(t['a']) == ' a \n-------\n 00001 \n 00002 '
# New-style has precedence
t['a'].format = '%4.2f {0:}'
assert str(t['a']) == ' a \n-------\n%4.2f 1\n%4.2f 2'
# Invalid format spec
with pytest.raises(ValueError):
t['a'].format = 'fail'
assert t['a'].format == '%4.2f {0:}' # format did not change
def test_column_format_with_threshold(self, table_type):
from astropy import conf
with conf.set_temp('max_lines', 8):
t = table_type([np.arange(20)], names=['a'])
t['a'].format = '%{0:}'
assert str(t['a']).splitlines() == [' a ',
'---',
' %0',
' %1',
'...',
'%18',
'%19',
'Length = 20 rows']
t['a'].format = '{ %4.2f }'
assert str(t['a']).splitlines() == [' a ',
'---------',
' { 0.00 }',
' { 1.00 }',
' ...',
'{ 18.00 }',
'{ 19.00 }',
'Length = 20 rows']
def test_column_format_func(self, table_type):
# run most of functions twice
# 1) astropy.table.pprint._format_funcs gets populated
# 2) astropy.table.pprint._format_funcs gets used
t = table_type([[1., 2.], [3, 4]], names=('a', 'b'))
# mathematical function
t['a'].format = lambda x: str(x * 3.)
assert str(t['a']) == ' a \n---\n3.0\n6.0'
assert str(t['a']) == ' a \n---\n3.0\n6.0'
def test_column_format_callable(self, table_type):
# run most of functions twice
# 1) astropy.table.pprint._format_funcs gets populated
# 2) astropy.table.pprint._format_funcs gets used
t = table_type([[1., 2.], [3, 4]], names=('a', 'b'))
# mathematical function
class format:
def __call__(self, x):
return str(x * 3.)
t['a'].format = format()
assert str(t['a']) == ' a \n---\n3.0\n6.0'
assert str(t['a']) == ' a \n---\n3.0\n6.0'
def test_column_format_func_wrong_number_args(self, table_type):
t = table_type([[1., 2.], [3, 4]], names=('a', 'b'))
# function that expects wrong number of arguments
def func(a, b):
pass
with pytest.raises(ValueError):
t['a'].format = func
def test_column_format_func_multiD(self, table_type):
arr = [np.array([[1, 2],
[10, 20]])]
t = table_type(arr, names=['a'])
# mathematical function
t['a'].format = lambda x: str(x * 3.)
outstr = ' a [2] \n------------\n 3.0 .. 6.0\n30.0 .. 60.0'
assert str(t['a']) == outstr
assert str(t['a']) == outstr
def test_column_format_func_not_str(self, table_type):
t = table_type([[1., 2.], [3, 4]], names=('a', 'b'))
# mathematical function
with pytest.raises(ValueError):
t['a'].format = lambda x: x * 3
def test_column_alignment(self, table_type):
t = table_type([[1], [2], [3], [4]],
names=('long title a', 'long title b',
'long title c', 'long title d'))
t['long title a'].format = '<'
t['long title b'].format = '^'
t['long title c'].format = '>'
t['long title d'].format = '0='
assert str(t['long title a']) == 'long title a\n------------\n1 '
assert str(t['long title b']) == 'long title b\n------------\n 2 '
assert str(t['long title c']) == 'long title c\n------------\n 3'
assert str(t['long title d']) == 'long title d\n------------\n000000000004'
class TestFormatWithMaskedElements():
def test_column_format(self):
t = Table([[1, 2, 3], [3, 4, 5]], names=('a', 'b'), masked=True)
t['a'].mask = [True, False, True]
# default (format=None)
assert str(t['a']) == ' a \n---\n --\n 2\n --'
# just a plain format string
t['a'].format = '5.2f'
assert str(t['a']) == ' a \n-----\n --\n 2.00\n --'
# Old-style that is almost new-style
t['a'].format = '{ %4.2f }'
assert str(t['a']) == ' a \n--------\n --\n{ 2.00 }\n --'
# New-style that is almost old-style
t['a'].format = '%{0:}'
assert str(t['a']) == ' a \n---\n --\n %2\n --'
# New-style with extra spaces
t['a'].format = ' {0:05d} '
assert str(t['a']) == ' a \n-------\n --\n 00002 \n --'
# New-style has precedence
t['a'].format = '%4.2f {0:}'
assert str(t['a']) == ' a \n-------\n --\n%4.2f 2\n --'
def test_column_format_with_threshold(self, table_type):
from astropy import conf
with conf.set_temp('max_lines', 8):
t = table_type([np.arange(20)], names=['a'])
t['a'].format = '%{0:}'
t['a'].mask[0] = True
t['a'].mask[-1] = True
assert str(t['a']).splitlines() == [' a ',
'---',
' --',
' %1',
'...',
'%18',
' --',
'Length = 20 rows']
t['a'].format = '{ %4.2f }'
assert str(t['a']).splitlines() == [' a ',
'---------',
' --',
' { 1.00 }',
' ...',
'{ 18.00 }',
' --',
'Length = 20 rows']
def test_column_format_func(self):
# run most of functions twice
# 1) astropy.table.pprint._format_funcs gets populated
# 2) astropy.table.pprint._format_funcs gets used
t = Table([[1., 2., 3.], [3, 4, 5]], names=('a', 'b'), masked=True)
t['a'].mask = [True, False, True]
# mathematical function
t['a'].format = lambda x: str(x * 3.)
assert str(t['a']) == ' a \n---\n --\n6.0\n --'
assert str(t['a']) == ' a \n---\n --\n6.0\n --'
def test_column_format_func_with_special_masked(self):
# run most of functions twice
# 1) astropy.table.pprint._format_funcs gets populated
# 2) astropy.table.pprint._format_funcs gets used
t = Table([[1., 2., 3.], [3, 4, 5]], names=('a', 'b'), masked=True)
t['a'].mask = [True, False, True]
# mathematical function
def format_func(x):
if x is np.ma.masked:
return '!!'
else:
return str(x * 3.)
t['a'].format = format_func
assert str(t['a']) == ' a \n---\n !!\n6.0\n !!'
assert str(t['a']) == ' a \n---\n !!\n6.0\n !!'
def test_column_format_callable(self):
# run most of functions twice
# 1) astropy.table.pprint._format_funcs gets populated
# 2) astropy.table.pprint._format_funcs gets used
t = Table([[1., 2., 3.], [3, 4, 5]], names=('a', 'b'), masked=True)
t['a'].mask = [True, False, True]
# mathematical function
class format:
def __call__(self, x):
return str(x * 3.)
t['a'].format = format()
assert str(t['a']) == ' a \n---\n --\n6.0\n --'
assert str(t['a']) == ' a \n---\n --\n6.0\n --'
def test_column_format_func_wrong_number_args(self):
t = Table([[1., 2.], [3, 4]], names=('a', 'b'), masked=True)
t['a'].mask = [True, False]
# function that expects wrong number of arguments
def func(a, b):
pass
with pytest.raises(ValueError):
t['a'].format = func
# but if all are masked, it never gets called
t['a'].mask = [True, True]
assert str(t['a']) == ' a \n---\n --\n --'
def test_column_format_func_multiD(self):
arr = [np.array([[1, 2],
[10, 20]])]
t = Table(arr, names=['a'], masked=True)
t['a'].mask[0, 1] = True
t['a'].mask[1, 1] = True
# mathematical function
t['a'].format = lambda x: str(x * 3.)
outstr = ' a [2] \n----------\n 3.0 .. --\n30.0 .. --'
assert str(t['a']) == outstr
assert str(t['a']) == outstr
def test_pprint_npfloat32():
"""
Test for #148, that np.float32 cannot by itself be formatted as float,
but has to be converted to a python float.
"""
dat = np.array([1., 2.], dtype=np.float32)
t = Table([dat], names=['a'])
t['a'].format = '5.2f'
assert str(t['a']) == ' a \n-----\n 1.00\n 2.00'
def test_pprint_py3_bytes():
"""
Test for #1346 and #4944. Make sure a bytestring (dtype=S<N>) in Python 3
is printed correctly (without the "b" prefix like b'string').
"""
val = bytes('val', encoding='utf-8')
blah = u'bläh'.encode('utf-8')
dat = np.array([val, blah], dtype=[(str('col'), 'S10')])
t = table.Table(dat)
assert t['col'].pformat() == ['col ', '----', ' val', u'bläh']
def test_pprint_nameless_col():
"""Regression test for #2213, making sure a nameless column can be printed
using None as the name.
"""
col = table.Column([1., 2.])
assert str(col).startswith('None')
def test_html():
"""Test HTML printing"""
dat = np.array([1., 2.], dtype=np.float32)
t = Table([dat], names=['a'])
lines = t.pformat(html=True)
assert lines == ['<table id="table{id}">'.format(id=id(t)),
u'<thead><tr><th>a</th></tr></thead>',
u'<tr><td>1.0</td></tr>',
u'<tr><td>2.0</td></tr>',
u'</table>']
lines = t.pformat(html=True, tableclass='table-striped')
assert lines == [
'<table id="table{id}" class="table-striped">'.format(id=id(t)),
u'<thead><tr><th>a</th></tr></thead>',
u'<tr><td>1.0</td></tr>',
u'<tr><td>2.0</td></tr>',
u'</table>']
lines = t.pformat(html=True, tableclass=['table', 'table-striped'])
assert lines == [
'<table id="table{id}" class="table table-striped">'.format(id=id(t)),
u'<thead><tr><th>a</th></tr></thead>',
u'<tr><td>1.0</td></tr>',
u'<tr><td>2.0</td></tr>',
u'</table>']
def test_align():
t = simple_table(2, kinds='iS')
assert t.pformat() == [' a b ',
'--- ---',
' 1 b',
' 2 c']
# Use column format attribute
t['a'].format = '<'
assert t.pformat() == [' a b ',
'--- ---',
'1 b',
'2 c']
# Now override column format attribute with various combinations of align
tpf = [' a b ',
'--- ---',
' 1 b ',
' 2 c ']
for align in ('^', ['^', '^'], ('^', '^')):
assert tpf == t.pformat(align=align)
assert t.pformat(align='<') == [' a b ',
'--- ---',
'1 b ',
'2 c ']
assert t.pformat(align='0=') == [' a b ',
'--- ---',
'001 00b',
'002 00c']
assert t.pformat(align=['<', '^']) == [' a b ',
'--- ---',
'1 b ',
'2 c ']
# Now use fill characters. Stress the system using a fill
# character that is the same as an align character.
t = simple_table(2, kinds='iS')
assert t.pformat(align='^^') == [' a b ',
'--- ---',
'^1^ ^b^',
'^2^ ^c^']
assert t.pformat(align='^>') == [' a b ',
'--- ---',
'^^1 ^^b',
'^^2 ^^c']
assert t.pformat(align='^<') == [' a b ',
'--- ---',
'1^^ b^^',
'2^^ c^^']
# Complicated interaction (same as narrative docs example)
t1 = Table([[1.0, 2.0], [1, 2]], names=['column1', 'column2'])
t1['column1'].format = '#^.2f'
assert t1.pformat() == ['column1 column2',
'------- -------',
'##1.00# 1',
'##2.00# 2']
assert t1.pformat(align='!<') == ['column1 column2',
'------- -------',
'1.00!!! 1!!!!!!',
'2.00!!! 2!!!!!!']
assert t1.pformat(align=[None, '!<']) == ['column1 column2',
'------- -------',
'##1.00# 1!!!!!!',
'##2.00# 2!!!!!!']
# Zero fill
t['a'].format = '+d'
assert t.pformat(align='0=') == [' a b ',
'--- ---',
'+01 00b',
'+02 00c']
with pytest.raises(ValueError):
t.pformat(align=['fail'])
with pytest.raises(TypeError):
t.pformat(align=0)
with pytest.raises(TypeError):
t.pprint(align=0)
# Make sure pprint() does not raise an exception
t.pprint()
with pytest.raises(ValueError):
t.pprint(align=['<', '<', '<'])
with pytest.raises(ValueError):
t.pprint(align='x=')
def test_auto_format_func():
"""Test for #5802 (fix for #5800 where format_func key is not unique)"""
t = Table([[1, 2] * u.m])
t['col0'].format = '%f'
t.pformat() # Force caching of format function
qt = QTable(t)
qt.pformat() # Generates exception prior to #5802
def test_decode_replace():
"""
Test printing a bytestring column with a value that fails
decoding to utf-8 and gets replaced by U+FFFD. See
https://docs.python.org/3/library/codecs.html#codecs.replace_errors
"""
t = Table([[b'Z\xf0']])
assert t.pformat() == [u'col0', u'----', u' Z\ufffd']
|
ddba00e4e9c286ea532e364ee7f6f92e92f5e5c69357f3010a932fa0ca7a6a46 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.table.bst import BST
def get_tree(TreeType):
b = TreeType([], [])
for val in [5, 2, 9, 3, 4, 1, 6, 10, 8, 7]:
b.add(val)
return b
@pytest.fixture
def tree():
return get_tree(BST)
r'''
5
/ \
2 9
/ \ / \
1 3 6 10
\ \
4 8
/
7
'''
@pytest.fixture
def bst(tree):
return tree
def test_bst_add(bst):
root = bst.root
assert root.data == [5]
assert root.left.data == [2]
assert root.right.data == [9]
assert root.left.left.data == [1]
assert root.left.right.data == [3]
assert root.right.left.data == [6]
assert root.right.right.data == [10]
assert root.left.right.right.data == [4]
assert root.right.left.right.data == [8]
assert root.right.left.right.left.data == [7]
def test_bst_dimensions(bst):
assert bst.size == 10
assert bst.height == 4
def test_bst_find(tree):
bst = tree
for i in range(1, 11):
node = bst.find(i)
assert node == [i]
assert bst.find(0) == []
assert bst.find(11) == []
assert bst.find('1') == []
def test_bst_traverse(bst):
preord = [5, 2, 1, 3, 4, 9, 6, 8, 7, 10]
inord = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
postord = [1, 4, 3, 2, 7, 8, 6, 10, 9, 5]
traversals = {}
for order in ('preorder', 'inorder', 'postorder'):
traversals[order] = [x.key for x in bst.traverse(order)]
assert traversals['preorder'] == preord
assert traversals['inorder'] == inord
assert traversals['postorder'] == postord
def test_bst_remove(bst):
order = (6, 9, 1, 3, 7, 2, 10, 5, 4, 8)
vals = set(range(1, 11))
for i, val in enumerate(order):
assert bst.remove(val) is True
assert bst.is_valid()
assert set([x.key for x in bst.traverse('inorder')]) == \
vals.difference(order[:i+1])
assert bst.size == 10 - i - 1
assert bst.remove(-val) is False
def test_bst_duplicate(bst):
bst.add(10, 11)
assert bst.find(10) == [10, 11]
assert bst.remove(10, data=10) is True
assert bst.find(10) == [11]
with pytest.raises(ValueError):
bst.remove(10, data=30) # invalid data
assert bst.remove(10) is True
assert bst.remove(10) is False
def test_bst_range(tree):
bst = tree
lst = bst.range_nodes(4, 8)
assert sorted(x.key for x in lst) == [4, 5, 6, 7, 8]
lst = bst.range_nodes(10, 11)
assert [x.key for x in lst] == [10]
lst = bst.range_nodes(11, 20)
assert len(lst) == 0
|
0ff41ffa80e315f0040eb6ee474e1303866f4626b0f69aa14f901c1cf558efe0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_almost_equal_nulp
from astropy.convolution.convolve import convolve_fft, convolve
from astropy.utils.exceptions import AstropyUserWarning
VALID_DTYPES = ('>f4', '<f4', '>f8', '<f8')
VALID_DTYPE_MATRIX = list(itertools.product(VALID_DTYPES, VALID_DTYPES))
BOUNDARY_OPTIONS = [None, 'fill', 'wrap']
NANTREATMENT_OPTIONS = ('interpolate', 'fill')
NORMALIZE_OPTIONS = [True, False]
PRESERVE_NAN_OPTIONS = [True, False]
"""
What does convolution mean? We use the 'same size' assumption here (i.e.,
you expect an array of the exact same size as the one you put in)
Convolving any array with a kernel that is [1] should result in the same array returned
Working example array: [1, 2, 3, 4, 5]
Convolved with [1] = [1, 2, 3, 4, 5]
Convolved with [1, 1] = [1, 3, 5, 7, 9] THIS IS NOT CONSISTENT!
Convolved with [1, 0] = [1, 2, 3, 4, 5]
Convolved with [0, 1] = [0, 1, 2, 3, 4]
"""
# NOTE: use_numpy_fft is redundant if you don't have FFTW installed
option_names = ('boundary', 'nan_treatment', 'normalize_kernel')
options = list(itertools.product(BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
(True, False),
))
option_names_preserve_nan = ('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan')
options_preserve_nan = list(itertools.product(BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
(True, False),
(True, False)))
def assert_floatclose(x, y):
"""Assert arrays are close to within expected floating point rounding.
Check that the result is correct at the precision expected for 64 bit
numbers, taking account that the tolerance has to reflect that all powers
in the FFTs enter our values.
"""
# The number used is set by the fact that the Windows FFT sometimes
# returns an answer that is EXACTLY 10*np.spacing.
assert_allclose(x, y, atol=10*np.spacing(x.max()), rtol=0.)
class TestConvolve1D:
@pytest.mark.parametrize(option_names, options)
def test_unity_1_none(self, boundary, nan_treatment, normalize_kernel):
'''
Test that a unit kernel with a single element returns the same array
'''
x = np.array([1., 2., 3.], dtype='float64')
y = np.array([1.], dtype='float64')
if boundary is None:
with pytest.warns(AstropyUserWarning, match="The convolve_fft "
"version of boundary=None is equivalent to the "
"convolve boundary='fill'"):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
else:
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_unity_3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None).
'''
x = np.array([1., 2., 3.], dtype='float64')
y = np.array([0., 1., 0.], dtype='float64')
if boundary is None:
with pytest.warns(AstropyUserWarning, match="The convolve_fft "
"version of boundary=None is equivalent to the "
"convolve boundary='fill'"):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
else:
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_uniform_3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements
'''
x = np.array([1., 0., 3.], dtype='float64')
y = np.array([1., 1., 1.], dtype='float64')
if boundary is None:
with pytest.warns(AstropyUserWarning, match="The convolve_fft "
"version of boundary=None is equivalent to the "
"convolve boundary='fill'"):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
else:
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
answer_key = (boundary, nan_treatment, normalize_kernel)
answer_dict = {
'sum_fill_zeros': np.array([1., 4., 3.], dtype='float64'),
'average_fill_zeros': np.array([1 / 3., 4 / 3., 1.], dtype='float64'),
'sum_wrap': np.array([4., 4., 4.], dtype='float64'),
'average_wrap': np.array([4 / 3., 4 / 3., 4 / 3.], dtype='float64'),
}
result_dict = {
# boundary, nan_treatment, normalize_kernel
('fill', 'interpolate', True): answer_dict['average_fill_zeros'],
('wrap', 'interpolate', True): answer_dict['average_wrap'],
('fill', 'interpolate', False): answer_dict['sum_fill_zeros'],
('wrap', 'interpolate', False): answer_dict['sum_wrap'],
}
for k in list(result_dict.keys()):
result_dict[(k[0], 'fill', k[2])] = result_dict[k]
for k in list(result_dict.keys()):
if k[0] == 'fill':
result_dict[(None, k[1], k[2])] = result_dict[k]
assert_floatclose(z, result_dict[answer_key])
@pytest.mark.parametrize(option_names, options)
def test_halfity_3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that the different modes are producing the correct results using
a uniform, non-unity kernel with three elements
'''
x = np.array([1., 0., 3.], dtype='float64')
y = np.array([0.5, 0.5, 0.5], dtype='float64')
if boundary is None:
with pytest.warns(AstropyUserWarning, match="The convolve_fft "
"version of boundary=None is equivalent to the "
"convolve boundary='fill'"):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
else:
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
answer_dict = {
'sum': np.array([0.5, 2.0, 1.5], dtype='float64'),
'sum_zeros': np.array([0.5, 2., 1.5], dtype='float64'),
'sum_nozeros': np.array([0.5, 2., 1.5], dtype='float64'),
'average': np.array([1 / 3., 4 / 3., 1.], dtype='float64'),
'sum_wrap': np.array([2., 2., 2.], dtype='float64'),
'average_wrap': np.array([4 / 3., 4 / 3., 4 / 3.], dtype='float64'),
'average_zeros': np.array([1 / 3., 4 / 3., 1.], dtype='float64'),
'average_nozeros': np.array([0.5, 4 / 3., 1.5], dtype='float64'),
}
if normalize_kernel:
answer_key = 'average'
else:
answer_key = 'sum'
if boundary == 'wrap':
answer_key += '_wrap'
else:
# average = average_zeros; sum = sum_zeros
answer_key += '_zeros'
assert_floatclose(z, answer_dict[answer_key])
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_unity_3_withnan(self, boundary, nan_treatment, normalize_kernel,
preserve_nan):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None). This version includes a NaN value in
the original array.
'''
x = np.array([1., np.nan, 3.], dtype='float64')
y = np.array([0., 1., 0.], dtype='float64')
if boundary is None:
with pytest.warns(AstropyUserWarning, match="The convolve_fft "
"version of boundary=None is equivalent to the "
"convolve boundary='fill'"):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
else:
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
z = np.nan_to_num(z)
assert_floatclose(z, [1., 0., 3.])
inputs = (np.array([1., np.nan, 3.], dtype='float64'),
np.array([1., np.inf, 3.], dtype='float64'))
outputs = (np.array([1., 0., 3.], dtype='float64'),
np.array([1., 0., 3.], dtype='float64'))
options_unity1withnan = list(itertools.product(BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
(True, False),
(True, False),
inputs, outputs))
@pytest.mark.parametrize(option_names_preserve_nan + ('inval', 'outval'),
options_unity1withnan)
def test_unity_1_withnan(self, boundary, nan_treatment, normalize_kernel,
preserve_nan, inval, outval):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None). This version includes a NaN value in
the original array.
'''
x = inval
y = np.array([1.], dtype='float64')
if boundary is None:
with pytest.warns(AstropyUserWarning, match="The convolve_fft "
"version of boundary=None is equivalent to the "
"convolve boundary='fill'"):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
else:
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
z = np.nan_to_num(z)
assert_floatclose(z, outval)
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_uniform_3_withnan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements. This version includes a NaN
value in the original array.
'''
x = np.array([1., np.nan, 3.], dtype='float64')
y = np.array([1., 1., 1.], dtype='float64')
if boundary is None:
with pytest.warns(AstropyUserWarning, match="The convolve_fft "
"version of boundary=None is equivalent to the "
"convolve boundary='fill'"):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
else:
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
answer_dict = {
'sum': np.array([1., 4., 3.], dtype='float64'),
'sum_nozeros': np.array([1., 4., 3.], dtype='float64'),
'sum_zeros': np.array([1., 4., 3.], dtype='float64'),
'sum_nozeros_interpnan': np.array([1., 4., 3.], dtype='float64'),
'average': np.array([1., 2., 3.], dtype='float64'),
'sum_wrap': np.array([4., 4., 4.], dtype='float64'),
'average_wrap': np.array([4/3., 4/3., 4/3.], dtype='float64'),
'average_wrap_interpnan': np.array([2, 2, 2], dtype='float64'),
'average_nozeros': np.array([1/2., 4/3., 3/2.], dtype='float64'),
'average_nozeros_interpnan': np.array([1., 2., 3.], dtype='float64'),
'average_zeros': np.array([1 / 3., 4 / 3., 3 / 3.], dtype='float64'),
'average_zeros_interpnan': np.array([1 / 2., 4 / 2., 3 / 2.], dtype='float64'),
}
for key in list(answer_dict.keys()):
if 'sum' in key:
answer_dict[key+"_interpnan"] = answer_dict[key] * 3./2.
if normalize_kernel:
answer_key = 'average'
else:
answer_key = 'sum'
if boundary == 'wrap':
answer_key += '_wrap'
else:
# average = average_zeros; sum = sum_zeros
answer_key += '_zeros'
if nan_treatment == 'interpolate':
answer_key += '_interpnan'
posns = np.where(np.isfinite(z))
assert_floatclose(z[posns], answer_dict[answer_key][posns])
def test_nan_fill(self):
# Test masked array
array = np.array([1., np.nan, 3.], dtype='float64')
kernel = np.array([1, 1, 1])
masked_array = np.ma.masked_array(array, mask=[0, 1, 0])
result = convolve_fft(masked_array, kernel, boundary='fill',
fill_value=np.nan)
assert_floatclose(result, [1, 2, 3])
def test_masked_array(self):
"""
Check whether convolve_fft works with masked arrays.
"""
# Test masked array
array = np.array([1., 2., 3.], dtype='float64')
kernel = np.array([1, 1, 1])
masked_array = np.ma.masked_array(array, mask=[0, 1, 0])
result = convolve_fft(masked_array, kernel, boundary='fill',
fill_value=0.)
assert_floatclose(result, [1./2, 2, 3./2])
# Now test against convolve()
convolve_result = convolve(masked_array, kernel, boundary='fill',
fill_value=0.)
assert_floatclose(convolve_result, result)
# Test masked kernel
array = np.array([1., 2., 3.], dtype='float64')
kernel = np.array([1, 1, 1])
masked_kernel = np.ma.masked_array(kernel, mask=[0, 1, 0])
result = convolve_fft(array, masked_kernel, boundary='fill',
fill_value=0.)
assert_floatclose(result, [1, 2, 1])
# Now test against convolve()
convolve_result = convolve(array, masked_kernel, boundary='fill',
fill_value=0.)
assert_floatclose(convolve_result, result)
def test_normalize_function(self):
"""
Check if convolve_fft works when passing a normalize function.
"""
array = [1, 2, 3]
kernel = [3, 3, 3]
result = convolve_fft(array, kernel, normalize_kernel=np.max)
assert_floatclose(result, [3, 6, 5])
@pytest.mark.parametrize(option_names, options)
def test_normalization_is_respected(self, boundary,
nan_treatment,
normalize_kernel):
"""
Check that if normalize_kernel is False then the normalization
tolerance is respected.
"""
array = np.array([1, 2, 3])
# A simple identity kernel to which a non-zero normalization is added.
base_kernel = np.array([1.0])
# Use the same normalization error tolerance in all cases.
normalization_rtol = 1e-4
# Add the error below to the kernel.
norm_error = [normalization_rtol / 10, normalization_rtol * 10]
for err in norm_error:
kernel = base_kernel + err
result = convolve_fft(array, kernel,
normalize_kernel=normalize_kernel,
nan_treatment=nan_treatment,
normalization_zero_tol=normalization_rtol)
if normalize_kernel:
# Kernel has been normalized to 1.
assert_floatclose(result, array)
else:
# Kernel should not have been normalized...
assert_floatclose(result, array * kernel)
class TestConvolve2D:
@pytest.mark.parametrize(option_names, options)
def test_unity_1x1_none(self, boundary, nan_treatment, normalize_kernel):
'''
Test that a 1x1 unit kernel returns the same array
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype='float64')
y = np.array([[1.]], dtype='float64')
if boundary is None:
with pytest.warns(AstropyUserWarning, match="The convolve_fft "
"version of boundary=None is equivalent to the "
"convolve boundary='fill'"):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
else:
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_unity_3x3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that a 3x3 unit kernel returns the same array (except when
boundary is None).
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype='float64')
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='float64')
if boundary is None:
with pytest.warns(AstropyUserWarning, match="The convolve_fft "
"version of boundary=None is equivalent to the "
"convolve boundary='fill'"):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
else:
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_uniform_3x3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel.
'''
x = np.array([[0., 0., 3.],
[1., 0., 0.],
[0., 2., 0.]], dtype='float64')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='float64')
if boundary is None:
with pytest.warns(AstropyUserWarning, match="The convolve_fft "
"version of boundary=None is equivalent to the "
"convolve boundary='fill'"):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
fill_value=np.nan if normalize_kernel else 0,
normalize_kernel=normalize_kernel)
else:
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
fill_value=np.nan if normalize_kernel else 0,
normalize_kernel=normalize_kernel)
w = np.array([[4., 6., 4.],
[6., 9., 6.],
[4., 6., 4.]], dtype='float64')
answer_dict = {
'sum': np.array([[1., 4., 3.],
[3., 6., 5.],
[3., 3., 2.]], dtype='float64'),
'sum_wrap': np.array([[6., 6., 6.],
[6., 6., 6.],
[6., 6., 6.]], dtype='float64'),
}
answer_dict['average'] = answer_dict['sum'] / w
answer_dict['average_wrap'] = answer_dict['sum_wrap'] / 9.
answer_dict['average_withzeros'] = answer_dict['sum'] / 9.
answer_dict['sum_withzeros'] = answer_dict['sum']
if normalize_kernel:
answer_key = 'average'
else:
answer_key = 'sum'
if boundary == 'wrap':
answer_key += '_wrap'
elif nan_treatment == 'fill':
answer_key += '_withzeros'
a = answer_dict[answer_key]
assert_floatclose(z, a)
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_unity_3x3_withnan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan):
'''
Test that a 3x3 unit kernel returns the same array (except when
boundary is None). This version includes a NaN value in the original
array.
'''
x = np.array([[1., 2., 3.],
[4., np.nan, 6.],
[7., 8., 9.]], dtype='float64')
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='float64')
if boundary is None:
with pytest.warns(AstropyUserWarning, match="The convolve_fft "
"version of boundary=None is equivalent to the "
"convolve boundary='fill'"):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
else:
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1, 1])
z = np.nan_to_num(z)
x = np.nan_to_num(x)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_uniform_3x3_withnan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[0., 0., 3.],
[1., np.nan, 0.],
[0., 2., 0.]], dtype='float64')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='float64')
# commented out: allow unnormalized nan-ignoring convolution
# # kernel is not normalized, so this situation -> exception
# if nan_treatment and not normalize_kernel:
# with pytest.raises(ValueError):
# z = convolve_fft(x, y, boundary=boundary,
# nan_treatment=nan_treatment,
# normalize_kernel=normalize_kernel,
# ignore_edge_zeros=ignore_edge_zeros,
# )
# return
if boundary is None:
with pytest.warns(AstropyUserWarning, match="The convolve_fft "
"version of boundary=None is equivalent to the "
"convolve boundary='fill'"):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
fill_value=np.nan if normalize_kernel else 0,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
else:
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
fill_value=np.nan if normalize_kernel else 0,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1, 1])
# weights
w_n = np.array([[3., 5., 3.],
[5., 8., 5.],
[3., 5., 3.]], dtype='float64')
w_z = np.array([[4., 6., 4.],
[6., 9., 6.],
[4., 6., 4.]], dtype='float64')
answer_dict = {
'sum': np.array([[1., 4., 3.],
[3., 6., 5.],
[3., 3., 2.]], dtype='float64'),
'sum_wrap': np.array([[6., 6., 6.],
[6., 6., 6.],
[6., 6., 6.]], dtype='float64'),
}
answer_dict['average'] = answer_dict['sum'] / w_z
answer_dict['average_interpnan'] = answer_dict['sum'] / w_n
answer_dict['average_wrap_interpnan'] = answer_dict['sum_wrap'] / 8.
answer_dict['average_wrap'] = answer_dict['sum_wrap'] / 9.
answer_dict['average_withzeros'] = answer_dict['sum'] / 9.
answer_dict['average_withzeros_interpnan'] = answer_dict['sum'] / 8.
answer_dict['sum_withzeros'] = answer_dict['sum']
answer_dict['sum_interpnan'] = answer_dict['sum'] * 9/8.
answer_dict['sum_withzeros_interpnan'] = answer_dict['sum']
answer_dict['sum_wrap_interpnan'] = answer_dict['sum_wrap'] * 9/8.
if normalize_kernel:
answer_key = 'average'
else:
answer_key = 'sum'
if boundary == 'wrap':
answer_key += '_wrap'
elif nan_treatment == 'fill':
answer_key += '_withzeros'
if nan_treatment == 'interpolate':
answer_key += '_interpnan'
answer_dict[answer_key]
# Skip the NaN at [1, 1] when preserve_nan=True
posns = np.where(np.isfinite(z))
# for reasons unknown, the Windows FFT returns an answer for the [0, 0]
# component that is EXACTLY 10*np.spacing
assert_floatclose(z[posns], z[posns])
def test_big_fail(self):
""" Test that convolve_fft raises an exception if a too-large array is passed in """
with pytest.raises((ValueError, MemoryError)):
# while a good idea, this approach did not work; it actually writes to disk
# arr = np.memmap('file.np', mode='w+', shape=(512, 512, 512), dtype=complex)
# this just allocates the memory but never touches it; it's better:
arr = np.empty([512, 512, 512], dtype=complex)
# note 512**3 * 16 bytes = 2.0 GB
convolve_fft(arr, arr)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_non_normalized_kernel(self, boundary):
x = np.array([[0., 0., 4.],
[1., 2., 0.],
[0., 3., 0.]], dtype='float')
y = np.array([[1., -1., 1.],
[-1., 0., -1.],
[1., -1., 1.]], dtype='float')
if boundary is None:
with pytest.warns(AstropyUserWarning, match="The convolve_fft "
"version of boundary=None is equivalent to the "
"convolve boundary='fill'"):
z = convolve_fft(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
else:
z = convolve_fft(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
if boundary in (None, 'fill'):
assert_floatclose(z, np.array([[1., -5., 2.],
[1., 0., -3.],
[-2., -1., -1.]], dtype='float'))
elif boundary == 'wrap':
assert_floatclose(z, np.array([[0., -8., 6.],
[5., 0., -4.],
[2., 3., -4.]], dtype='float'))
else:
raise ValueError("Invalid boundary specification")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_asymmetric_kernel(boundary):
'''
Make sure that asymmetric convolution
functions go the right direction
'''
x = np.array([3., 0., 1.], dtype='>f8')
y = np.array([1, 2, 3], dtype='>f8')
if boundary is None:
with pytest.warns(AstropyUserWarning, match="The convolve_fft "
"version of boundary=None is equivalent to the "
"convolve boundary='fill'"):
z = convolve_fft(x, y, boundary=boundary, normalize_kernel=False)
else:
z = convolve_fft(x, y, boundary=boundary, normalize_kernel=False)
if boundary in (None, 'fill'):
assert_array_almost_equal_nulp(z, np.array([6., 10., 2.], dtype='float'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([9., 10., 5.], dtype='float'), 10)
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan', 'dtype'),
itertools.product(BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS,
VALID_DTYPES))
def test_input_unmodified(boundary, nan_treatment,
normalize_kernel, preserve_nan, dtype):
"""
Test that convolve_fft works correctly when inputs are lists
"""
array = [1., 4., 5., 6., 5., 7., 8.]
kernel = [0.2, 0.6, 0.2]
x = np.array(array, dtype=dtype)
y = np.array(kernel, dtype=dtype)
# Make pseudoimmutable
x.flags.writeable = False
y.flags.writeable = False
z = convolve_fft(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel, preserve_nan=preserve_nan)
assert np.all(np.array(array, dtype=dtype) == x)
assert np.all(np.array(kernel, dtype=dtype) == y)
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan', 'dtype'),
itertools.product(BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS,
VALID_DTYPES))
def test_input_unmodified_with_nan(boundary, nan_treatment,
normalize_kernel, preserve_nan, dtype):
"""
Test that convolve_fft doesn't modify the input data
"""
array = [1., 4., 5., np.nan, 5., 7., 8.]
kernel = [0.2, 0.6, 0.2]
x = np.array(array, dtype=dtype)
y = np.array(kernel, dtype=dtype)
# Make pseudoimmutable
x.flags.writeable = False
y.flags.writeable = False
# make copies for post call comparison
x_copy = x.copy()
y_copy = y.copy()
z = convolve_fft(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel, preserve_nan=preserve_nan)
# ( NaN == NaN ) = False
# Only compare non NaN values for canonical equivalence
# and then check NaN explicitly with np.isnan()
array_is_nan = np.isnan(array)
kernel_is_nan = np.isnan(kernel)
array_not_nan = ~array_is_nan
kernel_not_nan = ~kernel_is_nan
assert np.all(x_copy[array_not_nan] == x[array_not_nan])
assert np.all(y_copy[kernel_not_nan] == y[kernel_not_nan])
assert np.all(np.isnan(x[array_is_nan]))
assert np.all(np.isnan(y[kernel_is_nan]))
|
df8fd539620b378c48433494a89b47576f33d871906c8689570bf777d93f0e44 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy import convolution as conv
from astropy.tests.helper import pickle_protocol, check_pickling_recovery # noqa
@pytest.mark.parametrize(("name", "args", "kwargs", "xfail"),
[(conv.CustomKernel, [],
{'array': np.random.rand(15)},
False),
(conv.Gaussian1DKernel, [1.0],
{'x_size': 5},
True),
(conv.Gaussian2DKernel, [1.0],
{'x_size': 5, 'y_size': 5},
True),
])
def test_simple_object(pickle_protocol, name, args, kwargs, xfail):
# Tests easily instantiated objects
if xfail:
pytest.xfail()
original = name(*args, **kwargs)
check_pickling_recovery(original, pickle_protocol)
|
ab33071e8b73df005ab5bba49014ddb73f27f4cf484d1f93ea9c0b7a9a34e3b2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.convolution.convolve import convolve, convolve_fft
from astropy.convolution.kernels import Gaussian2DKernel
from astropy.nddata import NDData
def test_basic_nddata():
arr = np.zeros((11, 11))
arr[5, 5] = 1
ndd = NDData(arr)
test_kernel = Gaussian2DKernel(1)
result = convolve(ndd, test_kernel)
x, y = np.mgrid[:11, :11]
expected = result[5, 5] * np.exp(-0.5 * ((x - 5)**2 + (y - 5)**2))
np.testing.assert_allclose(result, expected, atol=1e-6)
resultf = convolve_fft(ndd, test_kernel)
np.testing.assert_allclose(resultf, expected, atol=1e-6)
@pytest.mark.parametrize('convfunc',
[lambda *args: convolve(*args, nan_treatment='interpolate', normalize_kernel=True),
lambda *args: convolve_fft(*args, nan_treatment='interpolate', normalize_kernel=True)])
def test_masked_nddata(convfunc):
arr = np.zeros((11, 11))
arr[4, 5] = arr[6, 5] = arr[5, 4] = arr[5, 6] = 0.2
arr[5, 5] = 1.5
ndd_base = NDData(arr)
mask = arr < 0 # this is all False
mask[5, 5] = True
ndd_mask = NDData(arr, mask=mask)
arrnan = arr.copy()
arrnan[5, 5] = np.nan
ndd_nan = NDData(arrnan)
test_kernel = Gaussian2DKernel(1)
result_base = convfunc(ndd_base, test_kernel)
result_nan = convfunc(ndd_nan, test_kernel)
result_mask = convfunc(ndd_mask, test_kernel)
assert np.allclose(result_nan, result_mask)
assert not np.allclose(result_base, result_mask)
assert not np.allclose(result_base, result_nan)
# check to make sure the mask run doesn't talk back to the initial array
assert np.sum(np.isnan(ndd_base.data)) != np.sum(np.isnan(ndd_nan.data))
|
3d4d7b36f956efb25047bfdbc6f785b388ceb4647febdbd7d17ad73ff108f550 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import pytest
import numpy as np
from numpy.testing import assert_almost_equal
from astropy.convolution.convolve import convolve, convolve_fft
from astropy.convolution.kernels import Gaussian2DKernel, Box2DKernel, Tophat2DKernel
from astropy.convolution.kernels import Moffat2DKernel
SHAPES_ODD = [[15, 15], [31, 31]]
SHAPES_EVEN = [[8, 8], [16, 16], [32, 32]] # FIXME: not used ?!
NOSHAPE = [[None, None]]
WIDTHS = [2, 3, 4, 5]
KERNELS = []
for shape in SHAPES_ODD + NOSHAPE:
for width in WIDTHS:
KERNELS.append(Gaussian2DKernel(width,
x_size=shape[0],
y_size=shape[1],
mode='oversample',
factor=10))
KERNELS.append(Box2DKernel(width,
x_size=shape[0],
y_size=shape[1],
mode='oversample',
factor=10))
KERNELS.append(Tophat2DKernel(width,
x_size=shape[0],
y_size=shape[1],
mode='oversample',
factor=10))
KERNELS.append(Moffat2DKernel(width, 2,
x_size=shape[0],
y_size=shape[1],
mode='oversample',
factor=10))
class Test2DConvolutions:
@pytest.mark.parametrize('kernel', KERNELS)
def test_centered_makekernel(self, kernel):
"""
Test smoothing of an image with a single positive pixel
"""
shape = kernel.array.shape
x = np.zeros(shape)
xslice = tuple([slice(sh // 2, sh // 2 + 1) for sh in shape])
x[xslice] = 1.0
c2 = convolve_fft(x, kernel, boundary='fill')
c1 = convolve(x, kernel, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize('kernel', KERNELS)
def test_random_makekernel(self, kernel):
"""
Test smoothing of an image made of random noise
"""
shape = kernel.array.shape
x = np.random.randn(*shape)
c2 = convolve_fft(x, kernel, boundary='fill')
c1 = convolve(x, kernel, boundary='fill')
# not clear why, but these differ by a couple ulps...
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(('shape', 'width'), list(itertools.product(SHAPES_ODD, WIDTHS)))
def test_uniform_smallkernel(self, shape, width):
"""
Test smoothing of an image with a single positive pixel
Uses a simple, small kernel
"""
if width % 2 == 0:
# convolve does not accept odd-shape kernels
return
kernel = np.ones([width, width])
x = np.zeros(shape)
xslice = tuple([slice(sh // 2, sh // 2 + 1) for sh in shape])
x[xslice] = 1.0
c2 = convolve_fft(x, kernel, boundary='fill')
c1 = convolve(x, kernel, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(('shape', 'width'), list(itertools.product(SHAPES_ODD, [1, 3, 5])))
def test_smallkernel_Box2DKernel(self, shape, width):
"""
Test smoothing of an image with a single positive pixel
Compares a small uniform kernel to the Box2DKernel
"""
kernel1 = np.ones([width, width]) / float(width) ** 2
kernel2 = Box2DKernel(width, mode='oversample', factor=10)
x = np.zeros(shape)
xslice = tuple([slice(sh // 2, sh // 2 + 1) for sh in shape])
x[xslice] = 1.0
c2 = convolve_fft(x, kernel2, boundary='fill')
c1 = convolve_fft(x, kernel1, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
c2 = convolve(x, kernel2, boundary='fill')
c1 = convolve(x, kernel1, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
|
618d82552631e6fa7c9f57e88c223e7b09e63f6c3af60900272033eac5405cc2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import pytest
import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
from astropy.convolution.convolve import convolve, convolve_fft
from astropy.convolution.kernels import (
Gaussian1DKernel, Gaussian2DKernel, Box1DKernel, Box2DKernel,
Trapezoid1DKernel, TrapezoidDisk2DKernel, MexicanHat1DKernel,
Tophat2DKernel, MexicanHat2DKernel, AiryDisk2DKernel, Ring2DKernel,
CustomKernel, Model1DKernel, Model2DKernel, Kernel1D, Kernel2D)
from astropy.convolution.utils import KernelSizeError
from astropy.modeling.models import Box2D, Gaussian1D, Gaussian2D
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
try:
from scipy.ndimage import filters
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
WIDTHS_ODD = [3, 5, 7, 9]
WIDTHS_EVEN = [2, 4, 8, 16]
MODES = ['center', 'linear_interp', 'oversample', 'integrate']
KERNEL_TYPES = [Gaussian1DKernel, Gaussian2DKernel,
Box1DKernel, Box2DKernel,
Trapezoid1DKernel, TrapezoidDisk2DKernel,
MexicanHat1DKernel, Tophat2DKernel, AiryDisk2DKernel,
Ring2DKernel]
NUMS = [1, 1., np.float32(1.), np.float64(1.)]
# Test data
delta_pulse_1D = np.zeros(81)
delta_pulse_1D[40] = 1
delta_pulse_2D = np.zeros((81, 81))
delta_pulse_2D[40, 40] = 1
random_data_1D = np.random.rand(61)
random_data_2D = np.random.rand(61, 61)
class TestKernels:
"""
Test class for the built-in convolution kernels.
"""
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize(('width'), WIDTHS_ODD)
def test_scipy_filter_gaussian(self, width):
"""
Test GaussianKernel against SciPy ndimage gaussian filter.
"""
gauss_kernel_1D = Gaussian1DKernel(width)
gauss_kernel_1D.normalize()
gauss_kernel_2D = Gaussian2DKernel(width)
gauss_kernel_2D.normalize()
astropy_1D = convolve(delta_pulse_1D, gauss_kernel_1D, boundary='fill')
astropy_2D = convolve(delta_pulse_2D, gauss_kernel_2D, boundary='fill')
scipy_1D = filters.gaussian_filter(delta_pulse_1D, width)
scipy_2D = filters.gaussian_filter(delta_pulse_2D, width)
assert_almost_equal(astropy_1D, scipy_1D, decimal=12)
assert_almost_equal(astropy_2D, scipy_2D, decimal=12)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize(('width'), WIDTHS_ODD)
def test_scipy_filter_gaussian_laplace(self, width):
"""
Test MexicanHat kernels against SciPy ndimage gaussian laplace filters.
"""
mexican_kernel_1D = MexicanHat1DKernel(width)
mexican_kernel_2D = MexicanHat2DKernel(width)
astropy_1D = convolve(delta_pulse_1D, mexican_kernel_1D, boundary='fill', normalize_kernel=False)
astropy_2D = convolve(delta_pulse_2D, mexican_kernel_2D, boundary='fill', normalize_kernel=False)
with pytest.raises(Exception) as exc:
astropy_1D = convolve(delta_pulse_1D, mexican_kernel_1D, boundary='fill', normalize_kernel=True)
assert 'sum is close to zero' in exc.value.args[0]
with pytest.raises(Exception) as exc:
astropy_2D = convolve(delta_pulse_2D, mexican_kernel_2D, boundary='fill', normalize_kernel=True)
assert 'sum is close to zero' in exc.value.args[0]
# The Laplace of Gaussian filter is an inverted Mexican Hat
# filter.
scipy_1D = -filters.gaussian_laplace(delta_pulse_1D, width)
scipy_2D = -filters.gaussian_laplace(delta_pulse_2D, width)
# There is a slight deviation in the normalization. They differ by a
# factor of ~1.0000284132604045. The reason is not known.
assert_almost_equal(astropy_1D, scipy_1D, decimal=5)
assert_almost_equal(astropy_2D, scipy_2D, decimal=5)
@pytest.mark.parametrize(('kernel_type', 'width'), list(itertools.product(KERNEL_TYPES, WIDTHS_ODD)))
def test_delta_data(self, kernel_type, width):
"""
Test smoothing of an image with a single positive pixel
"""
if kernel_type == AiryDisk2DKernel and not HAS_SCIPY:
pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy")
if not kernel_type == Ring2DKernel:
kernel = kernel_type(width)
else:
kernel = kernel_type(width, width * 0.2)
if kernel.dimension == 1:
c1 = convolve_fft(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False)
c2 = convolve(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False)
assert_almost_equal(c1, c2, decimal=12)
else:
c1 = convolve_fft(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False)
c2 = convolve(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False)
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(('kernel_type', 'width'), list(itertools.product(KERNEL_TYPES, WIDTHS_ODD)))
def test_random_data(self, kernel_type, width):
"""
Test smoothing of an image made of random noise
"""
if kernel_type == AiryDisk2DKernel and not HAS_SCIPY:
pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy")
if not kernel_type == Ring2DKernel:
kernel = kernel_type(width)
else:
kernel = kernel_type(width, width * 0.2)
if kernel.dimension == 1:
c1 = convolve_fft(random_data_1D, kernel, boundary='fill', normalize_kernel=False)
c2 = convolve(random_data_1D, kernel, boundary='fill', normalize_kernel=False)
assert_almost_equal(c1, c2, decimal=12)
else:
c1 = convolve_fft(random_data_2D, kernel, boundary='fill', normalize_kernel=False)
c2 = convolve(random_data_2D, kernel, boundary='fill', normalize_kernel=False)
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(('width'), WIDTHS_ODD)
def test_uniform_smallkernel(self, width):
"""
Test smoothing of an image with a single positive pixel
Instead of using kernel class, uses a simple, small kernel
"""
kernel = np.ones([width, width])
c2 = convolve_fft(delta_pulse_2D, kernel, boundary='fill')
c1 = convolve(delta_pulse_2D, kernel, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(('width'), WIDTHS_ODD)
def test_smallkernel_vs_Box2DKernel(self, width):
"""
Test smoothing of an image with a single positive pixel
"""
kernel1 = np.ones([width, width]) / width ** 2
kernel2 = Box2DKernel(width)
c2 = convolve_fft(delta_pulse_2D, kernel2, boundary='fill')
c1 = convolve_fft(delta_pulse_2D, kernel1, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
def test_convolve_1D_kernels(self):
"""
Check if convolving two kernels with each other works correctly.
"""
gauss_1 = Gaussian1DKernel(3)
gauss_2 = Gaussian1DKernel(4)
test_gauss_3 = Gaussian1DKernel(5)
with pytest.warns(AstropyUserWarning, match='Both array and kernel '
'are Kernel instances'):
gauss_3 = convolve(gauss_1, gauss_2)
assert np.all(np.abs((gauss_3 - test_gauss_3).array) < 0.01)
def test_convolve_2D_kernels(self):
"""
Check if convolving two kernels with each other works correctly.
"""
gauss_1 = Gaussian2DKernel(3)
gauss_2 = Gaussian2DKernel(4)
test_gauss_3 = Gaussian2DKernel(5)
with pytest.warns(AstropyUserWarning, match='Both array and kernel '
'are Kernel instances'):
gauss_3 = convolve(gauss_1, gauss_2)
assert np.all(np.abs((gauss_3 - test_gauss_3).array) < 0.01)
@pytest.mark.parametrize(('number'), NUMS)
def test_multiply_scalar(self, number):
"""
Check if multiplying a kernel with a scalar works correctly.
"""
gauss = Gaussian1DKernel(3)
gauss_new = number * gauss
assert_almost_equal(gauss_new.array, gauss.array * number, decimal=12)
@pytest.mark.parametrize(('number'), NUMS)
def test_multiply_scalar_type(self, number):
"""
Check if multiplying a kernel with a scalar works correctly.
"""
gauss = Gaussian1DKernel(3)
gauss_new = number * gauss
assert type(gauss_new) is Gaussian1DKernel
@pytest.mark.parametrize(('number'), NUMS)
def test_rmultiply_scalar_type(self, number):
"""
Check if multiplying a kernel with a scalar works correctly.
"""
gauss = Gaussian1DKernel(3)
gauss_new = gauss * number
assert type(gauss_new) is Gaussian1DKernel
def test_multiply_kernel1d(self):
"""Test that multiplying two 1D kernels raises an exception."""
gauss = Gaussian1DKernel(3)
with pytest.raises(Exception):
gauss * gauss
def test_multiply_kernel2d(self):
"""Test that multiplying two 2D kernels raises an exception."""
gauss = Gaussian2DKernel(3)
with pytest.raises(Exception):
gauss * gauss
def test_multiply_kernel1d_kernel2d(self):
"""
Test that multiplying a 1D kernel with a 2D kernel raises an
exception.
"""
with pytest.raises(Exception):
Gaussian1DKernel(3) * Gaussian2DKernel(3)
def test_add_kernel_scalar(self):
"""Test that adding a scalar to a kernel raises an exception."""
with pytest.raises(Exception):
Gaussian1DKernel(3) + 1
def test_model_1D_kernel(self):
"""
Check Model1DKernel against Gaussian1Dkernel
"""
stddev = 5.
gauss = Gaussian1D(1. / np.sqrt(2 * np.pi * stddev**2), 0, stddev)
model_gauss_kernel = Model1DKernel(gauss, x_size=21)
gauss_kernel = Gaussian1DKernel(stddev, x_size=21)
assert_almost_equal(model_gauss_kernel.array, gauss_kernel.array,
decimal=12)
def test_model_2D_kernel(self):
"""
Check Model2DKernel against Gaussian2Dkernel
"""
stddev = 5.
gauss = Gaussian2D(1. / (2 * np.pi * stddev**2), 0, 0, stddev, stddev)
model_gauss_kernel = Model2DKernel(gauss, x_size=21)
gauss_kernel = Gaussian2DKernel(stddev, x_size=21)
assert_almost_equal(model_gauss_kernel.array, gauss_kernel.array,
decimal=12)
def test_custom_1D_kernel(self):
"""
Check CustomKernel against Box1DKernel.
"""
# Define one dimensional array:
array = np.ones(5)
custom = CustomKernel(array)
custom.normalize()
box = Box1DKernel(5)
c2 = convolve(delta_pulse_1D, custom, boundary='fill')
c1 = convolve(delta_pulse_1D, box, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
def test_custom_2D_kernel(self):
"""
Check CustomKernel against Box2DKernel.
"""
# Define one dimensional array:
array = np.ones((5, 5))
custom = CustomKernel(array)
custom.normalize()
box = Box2DKernel(5)
c2 = convolve(delta_pulse_2D, custom, boundary='fill')
c1 = convolve(delta_pulse_2D, box, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
def test_custom_1D_kernel_list(self):
"""
Check if CustomKernel works with lists.
"""
custom = CustomKernel([1, 1, 1, 1, 1])
assert custom.is_bool is True
def test_custom_2D_kernel_list(self):
"""
Check if CustomKernel works with lists.
"""
custom = CustomKernel([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
assert custom.is_bool is True
def test_custom_1D_kernel_zerosum(self):
"""
Check if CustomKernel works when the input array/list
sums to zero.
"""
array = [-2, -1, 0, 1, 2]
custom = CustomKernel(array)
with pytest.warns(AstropyUserWarning, match='kernel cannot be '
'normalized because it sums to zero'):
custom.normalize()
assert custom.truncation == 0.
assert custom._kernel_sum == 0.
def test_custom_2D_kernel_zerosum(self):
"""
Check if CustomKernel works when the input array/list
sums to zero.
"""
array = [[0, -1, 0], [-1, 4, -1], [0, -1, 0]]
custom = CustomKernel(array)
with pytest.warns(AstropyUserWarning, match='kernel cannot be '
'normalized because it sums to zero'):
custom.normalize()
assert custom.truncation == 0.
assert custom._kernel_sum == 0.
def test_custom_kernel_odd_error(self):
"""
Check if CustomKernel raises if the array size is odd.
"""
with pytest.raises(KernelSizeError):
CustomKernel([1, 1, 1, 1])
def test_add_1D_kernels(self):
"""
Check if adding of two 1D kernels works.
"""
box_1 = Box1DKernel(5)
box_2 = Box1DKernel(3)
box_3 = Box1DKernel(1)
box_sum_1 = box_1 + box_2 + box_3
box_sum_2 = box_2 + box_3 + box_1
box_sum_3 = box_3 + box_1 + box_2
ref = [1/5., 1/5. + 1/3., 1 + 1/3. + 1/5., 1/5. + 1/3., 1/5.]
assert_almost_equal(box_sum_1.array, ref, decimal=12)
assert_almost_equal(box_sum_2.array, ref, decimal=12)
assert_almost_equal(box_sum_3.array, ref, decimal=12)
# Assert that the kernels haven't changed
assert_almost_equal(box_1.array, [0.2, 0.2, 0.2, 0.2, 0.2], decimal=12)
assert_almost_equal(box_2.array, [1/3., 1/3., 1/3.], decimal=12)
assert_almost_equal(box_3.array, [1], decimal=12)
def test_add_2D_kernels(self):
"""
Check if adding of two 1D kernels works.
"""
box_1 = Box2DKernel(3)
box_2 = Box2DKernel(1)
box_sum_1 = box_1 + box_2
box_sum_2 = box_2 + box_1
ref = [[1 / 9., 1 / 9., 1 / 9.],
[1 / 9., 1 + 1 / 9., 1 / 9.],
[1 / 9., 1 / 9., 1 / 9.]]
ref_1 = [[1 / 9., 1 / 9., 1 / 9.],
[1 / 9., 1 / 9., 1 / 9.],
[1 / 9., 1 / 9., 1 / 9.]]
assert_almost_equal(box_2.array, [[1]], decimal=12)
assert_almost_equal(box_1.array, ref_1, decimal=12)
assert_almost_equal(box_sum_1.array, ref, decimal=12)
assert_almost_equal(box_sum_2.array, ref, decimal=12)
def test_Gaussian1DKernel_even_size(self):
"""
Check if even size for GaussianKernel works.
"""
gauss = Gaussian1DKernel(3, x_size=10)
assert gauss.array.size == 10
def test_Gaussian2DKernel_even_size(self):
"""
Check if even size for GaussianKernel works.
"""
gauss = Gaussian2DKernel(3, x_size=10, y_size=10)
assert gauss.array.shape == (10, 10)
# https://github.com/astropy/astropy/issues/3605
def test_Gaussian2DKernel_rotated(self):
with pytest.warns(AstropyDeprecationWarning) as w:
Gaussian2DKernel(stddev=10)
assert len(w) == 1
gauss = Gaussian2DKernel(
x_stddev=3, y_stddev=1.5, theta=0.7853981633974483,
x_size=5, y_size=5) # rotated 45 deg ccw
ans = [[0.02267712, 0.02464785, 0.02029238, 0.01265463, 0.00597762],
[0.02464785, 0.03164847, 0.03078144, 0.02267712, 0.01265463],
[0.02029238, 0.03078144, 0.03536777, 0.03078144, 0.02029238],
[0.01265463, 0.02267712, 0.03078144, 0.03164847, 0.02464785],
[0.00597762, 0.01265463, 0.02029238, 0.02464785, 0.02267712]]
assert_allclose(gauss, ans, rtol=0.001) # Rough comparison at 0.1 %
def test_normalize_peak(self):
"""
Check if normalize works with peak mode.
"""
custom = CustomKernel([1, 2, 3, 2, 1])
custom.normalize(mode='peak')
assert custom.array.max() == 1
def test_check_kernel_attributes(self):
"""
Check if kernel attributes are correct.
"""
box = Box2DKernel(5)
# Check truncation
assert box.truncation == 0
# Check model
assert isinstance(box.model, Box2D)
# Check center
assert box.center == [2, 2]
# Check normalization
box.normalize()
assert_almost_equal(box._kernel_sum, 1., decimal=12)
# Check separability
assert box.separable
@pytest.mark.parametrize(('kernel_type', 'mode'), list(itertools.product(KERNEL_TYPES, MODES)))
def test_discretize_modes(self, kernel_type, mode):
"""
Check if the different modes result in kernels that work with convolve.
Use only small kernel width, to make the test pass quickly.
"""
if kernel_type == AiryDisk2DKernel and not HAS_SCIPY:
pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy")
if not kernel_type == Ring2DKernel:
kernel = kernel_type(3)
else:
kernel = kernel_type(3, 3 * 0.2)
if kernel.dimension == 1:
c1 = convolve_fft(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False)
c2 = convolve(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False)
assert_almost_equal(c1, c2, decimal=12)
else:
c1 = convolve_fft(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False)
c2 = convolve(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False)
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(('width'), WIDTHS_EVEN)
def test_box_kernels_even_size(self, width):
"""
Check if BoxKernel work properly with even sizes.
"""
kernel_1D = Box1DKernel(width)
assert kernel_1D.shape[0] % 2 != 0
assert kernel_1D.array.sum() == 1.
kernel_2D = Box2DKernel(width)
assert np.all([_ % 2 != 0 for _ in kernel_2D.shape])
assert kernel_2D.array.sum() == 1.
def test_kernel_normalization(self):
"""
Test that repeated normalizations do not change the kernel [#3747].
"""
kernel = CustomKernel(np.ones(5))
kernel.normalize()
data = np.copy(kernel.array)
kernel.normalize()
assert_allclose(data, kernel.array)
kernel.normalize()
assert_allclose(data, kernel.array)
def test_kernel_normalization_mode(self):
"""
Test that an error is raised if mode is invalid.
"""
with pytest.raises(ValueError):
kernel = CustomKernel(np.ones(3))
kernel.normalize(mode='invalid')
def test_kernel1d_int_size(self):
"""
Test that an error is raised if ``Kernel1D`` ``x_size`` is not
an integer.
"""
with pytest.raises(TypeError):
Gaussian1DKernel(3, x_size=1.2)
def test_kernel2d_int_xsize(self):
"""
Test that an error is raised if ``Kernel2D`` ``x_size`` is not
an integer.
"""
with pytest.raises(TypeError):
Gaussian2DKernel(3, x_size=1.2)
def test_kernel2d_int_ysize(self):
"""
Test that an error is raised if ``Kernel2D`` ``y_size`` is not
an integer.
"""
with pytest.raises(TypeError):
Gaussian2DKernel(3, x_size=5, y_size=1.2)
def test_kernel1d_initialization(self):
"""
Test that an error is raised if an array or model is not
specified for ``Kernel1D``.
"""
with pytest.raises(TypeError):
Kernel1D()
def test_kernel2d_initialization(self):
"""
Test that an error is raised if an array or model is not
specified for ``Kernel2D``.
"""
with pytest.raises(TypeError):
Kernel2D()
|
a9d56a3c4e62479316bedf1b6b618f42e29c24a8264b948b926f30f7dab85239 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.convolution.utils import discretize_model
from astropy.modeling.functional_models import (
Gaussian1D, Box1D, MexicanHat1D, Gaussian2D, Box2D, MexicanHat2D)
from astropy.modeling.tests.example_models import models_1D, models_2D
from astropy.modeling.tests.test_models import create_model
try:
import scipy # pylint: disable=W0611
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
modes = ['center', 'linear_interp', 'oversample']
test_models_1D = [Gaussian1D, Box1D, MexicanHat1D]
test_models_2D = [Gaussian2D, Box2D, MexicanHat2D]
@pytest.mark.parametrize(('model_class', 'mode'), list(itertools.product(test_models_1D, modes)))
def test_pixel_sum_1D(model_class, mode):
"""
Test if the sum of all pixels corresponds nearly to the integral.
"""
if model_class == Box1D and mode == "center":
pytest.skip("Non integrating mode. Skip integral test.")
parameters = models_1D[model_class]
model = create_model(model_class, parameters)
values = discretize_model(model, models_1D[model_class]['x_lim'], mode=mode)
assert_allclose(values.sum(), models_1D[model_class]['integral'], atol=0.0001)
@pytest.mark.parametrize('mode', modes)
def test_gaussian_eval_1D(mode):
"""
Discretize Gaussian with different modes and check
if result is at least similar to Gaussian1D.eval().
"""
model = Gaussian1D(1, 0, 20)
x = np.arange(-100, 101)
values = model(x)
disc_values = discretize_model(model, (-100, 101), mode=mode)
assert_allclose(values, disc_values, atol=0.001)
@pytest.mark.parametrize(('model_class', 'mode'), list(itertools.product(test_models_2D, modes)))
def test_pixel_sum_2D(model_class, mode):
"""
Test if the sum of all pixels corresponds nearly to the integral.
"""
if model_class == Box2D and mode == "center":
pytest.skip("Non integrating mode. Skip integral test.")
parameters = models_2D[model_class]
model = create_model(model_class, parameters)
values = discretize_model(model, models_2D[model_class]['x_lim'],
models_2D[model_class]['y_lim'], mode=mode)
assert_allclose(values.sum(), models_2D[model_class]['integral'], atol=0.0001)
@pytest.mark.parametrize('mode', modes)
def test_gaussian_eval_2D(mode):
"""
Discretize Gaussian with different modes and check
if result is at least similar to Gaussian2D.eval()
"""
model = Gaussian2D(0.01, 0, 0, 1, 1)
x = np.arange(-2, 3)
y = np.arange(-2, 3)
x, y = np.meshgrid(x, y)
values = model(x, y)
disc_values = discretize_model(model, (-2, 3), (-2, 3), mode=mode)
assert_allclose(values, disc_values, atol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
def test_gaussian_eval_2D_integrate_mode():
"""
Discretize Gaussian with integrate mode
"""
model_list = [Gaussian2D(.01, 0, 0, 2, 2),
Gaussian2D(.01, 0, 0, 1, 2),
Gaussian2D(.01, 0, 0, 2, 1)]
x = np.arange(-2, 3)
y = np.arange(-2, 3)
x, y = np.meshgrid(x, y)
for model in model_list:
values = model(x, y)
disc_values = discretize_model(model, (-2, 3), (-2, 3), mode='integrate')
assert_allclose(values, disc_values, atol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
def test_subpixel_gauss_1D():
"""
Test subpixel accuracy of the integrate mode with gaussian 1D model.
"""
gauss_1D = Gaussian1D(1, 0, 0.1)
values = discretize_model(gauss_1D, (-1, 2), mode='integrate', factor=100)
assert_allclose(values.sum(), np.sqrt(2 * np.pi) * 0.1, atol=0.00001)
@pytest.mark.skipif('not HAS_SCIPY')
def test_subpixel_gauss_2D():
"""
Test subpixel accuracy of the integrate mode with gaussian 2D model.
"""
gauss_2D = Gaussian2D(1, 0, 0, 0.1, 0.1)
values = discretize_model(gauss_2D, (-1, 2), (-1, 2), mode='integrate', factor=100)
assert_allclose(values.sum(), 2 * np.pi * 0.01, atol=0.00001)
def test_discretize_callable_1d():
"""
Test discretize when a 1d function is passed.
"""
def f(x):
return x ** 2
y = discretize_model(f, (-5, 6))
assert_allclose(y, np.arange(-5, 6) ** 2)
def test_discretize_callable_2d():
"""
Test discretize when a 2d function is passed.
"""
def f(x, y):
return x ** 2 + y ** 2
actual = discretize_model(f, (-5, 6), (-5, 6))
y, x = (np.indices((11, 11)) - 5)
desired = x ** 2 + y ** 2
assert_allclose(actual, desired)
def test_type_exception():
"""
Test type exception.
"""
with pytest.raises(TypeError) as exc:
discretize_model(float(0), (-10, 11))
assert exc.value.args[0] == 'Model must be callable.'
def test_dim_exception_1d():
"""
Test dimension exception 1d.
"""
def f(x):
return x ** 2
with pytest.raises(ValueError) as exc:
discretize_model(f, (-10, 11), (-10, 11))
assert exc.value.args[0] == "y range specified, but model is only 1-d."
def test_dim_exception_2d():
"""
Test dimension exception 2d.
"""
def f(x, y):
return x ** 2 + y ** 2
with pytest.raises(ValueError) as exc:
discretize_model(f, (-10, 11))
assert exc.value.args[0] == "y range not specified, but model is 2-d"
def test_float_x_range_exception():
def f(x, y):
return x ** 2 + y ** 2
with pytest.raises(ValueError) as exc:
discretize_model(f, (-10.002, 11.23))
assert exc.value.args[0] == ("The difference between the upper an lower"
" limit of 'x_range' must be a whole number.")
def test_float_y_range_exception():
def f(x, y):
return x ** 2 + y ** 2
with pytest.raises(ValueError) as exc:
discretize_model(f, (-10, 11), (-10.002, 11.23))
assert exc.value.args[0] == ("The difference between the upper an lower"
" limit of 'y_range' must be a whole number.")
|
efb1bb7081942fcc208413fe88cd7166f7eba9f165206f845f31940c2a8ee397 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
import numpy.ma as ma
from astropy.convolution.convolve import convolve, convolve_fft
from astropy.convolution.kernels import Gaussian2DKernel
from astropy.utils.exceptions import AstropyUserWarning
from numpy.testing import (assert_array_almost_equal_nulp,
assert_array_almost_equal,
assert_allclose)
import itertools
VALID_DTYPES = ('>f4', '<f4', '>f8', '<f8')
VALID_DTYPE_MATRIX = list(itertools.product(VALID_DTYPES, VALID_DTYPES))
BOUNDARY_OPTIONS = [None, 'fill', 'wrap', 'extend']
NANHANDLING_OPTIONS = ['interpolate', 'fill']
NORMALIZE_OPTIONS = [True, False]
PRESERVE_NAN_OPTIONS = [True, False]
BOUNDARIES_AND_CONVOLUTIONS = (list(zip(itertools.cycle((convolve,)),
BOUNDARY_OPTIONS)) + [(convolve_fft,
'wrap'),
(convolve_fft,
'fill')])
HAS_SCIPY = True
try:
import scipy
except ImportError:
HAS_SCIPY = False
HAS_PANDAS = True
try:
import pandas
except ImportError:
HAS_PANDAS = False
class TestConvolve1D:
def test_list(self):
"""
Test that convolve works correctly when inputs are lists
"""
x = [1, 4, 5, 6, 5, 7, 8]
y = [0.2, 0.6, 0.2]
z = convolve(x, y, boundary=None)
assert_array_almost_equal_nulp(z,
np.array([0., 3.6, 5., 5.6, 5.6, 6.8, 0.]), 10)
def test_tuple(self):
"""
Test that convolve works correctly when inputs are tuples
"""
x = (1, 4, 5, 6, 5, 7, 8)
y = (0.2, 0.6, 0.2)
z = convolve(x, y, boundary=None)
assert_array_almost_equal_nulp(z,
np.array([0., 3.6, 5., 5.6, 5.6, 6.8, 0.]), 10)
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan', 'dtype'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS,
VALID_DTYPES))
def test_input_unmodified(self, boundary, nan_treatment,
normalize_kernel, preserve_nan, dtype):
"""
Test that convolve works correctly when inputs are lists
"""
array = [1., 4., 5., 6., 5., 7., 8.]
kernel = [0.2, 0.6, 0.2]
x = np.array(array, dtype=dtype)
y = np.array(kernel, dtype=dtype)
# Make pseudoimmutable
x.flags.writeable = False
y.flags.writeable = False
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel, preserve_nan=preserve_nan)
assert np.all(np.array(array, dtype=dtype) == x)
assert np.all(np.array(kernel, dtype=dtype) == y)
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan', 'dtype'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS,
VALID_DTYPES))
def test_input_unmodified_with_nan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan, dtype):
"""
Test that convolve doesn't modify the input data
"""
array = [1., 4., 5., np.nan, 5., 7., 8.]
kernel = [0.2, 0.6, 0.2]
x = np.array(array, dtype=dtype)
y = np.array(kernel, dtype=dtype)
# Make pseudoimmutable
x.flags.writeable = False
y.flags.writeable = False
# make copies for post call comparison
x_copy = x.copy()
y_copy = y.copy()
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel, preserve_nan=preserve_nan)
# ( NaN == NaN ) = False
# Only compare non NaN values for canonical equivalance
# and then check NaN explicitly with np.isnan()
array_is_nan = np.isnan(array)
kernel_is_nan = np.isnan(kernel)
array_not_nan = ~array_is_nan
kernel_not_nan = ~kernel_is_nan
assert np.all(x_copy[array_not_nan] == x[array_not_nan])
assert np.all(y_copy[kernel_not_nan] == y[kernel_not_nan])
assert np.all(np.isnan(x[array_is_nan]))
assert np.all(np.isnan(y[kernel_is_nan]))
@pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPE_MATRIX)
def test_dtype(self, dtype_array, dtype_kernel):
'''
Test that 32- and 64-bit floats are correctly handled
'''
x = np.array([1., 2., 3.], dtype=dtype_array)
y = np.array([0., 1., 0.], dtype=dtype_kernel)
z = convolve(x, y)
assert x.dtype == z.dtype
@pytest.mark.parametrize(('convfunc', 'boundary',), BOUNDARIES_AND_CONVOLUTIONS)
def test_unity_1_none(self, boundary, convfunc):
'''
Test that a unit kernel with a single element returns the same array
'''
x = np.array([1., 2., 3.], dtype='>f8')
y = np.array([1.], dtype='>f8')
z = convfunc(x, y, boundary=boundary)
np.testing.assert_allclose(z, x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3(self, boundary):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None).
'''
x = np.array([1., 2., 3.], dtype='>f8')
y = np.array([0., 1., 0.], dtype='>f8')
z = convolve(x, y, boundary=boundary)
if boundary is None:
assert np.all(z == np.array([0., 2., 0.], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3(self, boundary):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements
'''
x = np.array([1., 0., 3.], dtype='>f8')
y = np.array([1., 1., 1.], dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary is None:
assert np.all(z == np.array([0., 4., 0.], dtype='>f8'))
elif boundary == 'fill':
assert np.all(z == np.array([1., 4., 3.], dtype='>f8'))
elif boundary == 'wrap':
assert np.all(z == np.array([4., 4., 4.], dtype='>f8'))
else:
assert np.all(z == np.array([2., 4., 6.], dtype='>f8'))
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS))
def test_unity_3_withnan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None). This version includes a NaN value in
the original array.
'''
x = np.array([1., np.nan, 3.], dtype='>f8')
y = np.array([0., 1., 0.], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(z == np.array([0., 0., 0.], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS))
def test_uniform_3_withnan(self, boundary, nan_treatment, normalize_kernel,
preserve_nan):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements. This version includes a NaN
value in the original array.
'''
x = np.array([1., np.nan, 3.], dtype='>f8')
y = np.array([1., 1., 1.], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
z = np.nan_to_num(z)
# boundary, nan_treatment, normalize_kernel
rslt = {
(None, 'interpolate', True): [0, 2, 0],
(None, 'interpolate', False): [0, 6, 0],
(None, 'fill', True): [0, 4/3., 0],
(None, 'fill', False): [0, 4, 0],
('fill', 'interpolate', True): [1/2., 2, 3/2.],
('fill', 'interpolate', False): [3/2., 6, 9/2.],
('fill', 'fill', True): [1/3., 4/3., 3/3.],
('fill', 'fill', False): [1, 4, 3],
('wrap', 'interpolate', True): [2, 2, 2],
('wrap', 'interpolate', False): [6, 6, 6],
('wrap', 'fill', True): [4/3., 4/3., 4/3.],
('wrap', 'fill', False): [4, 4, 4],
('extend', 'interpolate', True): [1, 2, 3],
('extend', 'interpolate', False): [3, 6, 9],
('extend', 'fill', True): [2/3., 4/3., 6/3.],
('extend', 'fill', False): [2, 4, 6],
}[boundary, nan_treatment, normalize_kernel]
if preserve_nan:
rslt[1] = 0
assert_array_almost_equal_nulp(z, np.array(rslt, dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary', 'normalize_kernel'),
itertools.product(BOUNDARY_OPTIONS,
NORMALIZE_OPTIONS))
def test_zero_sum_kernel(self, boundary, normalize_kernel):
"""
Test that convolve works correctly with zero sum kernels.
"""
if normalize_kernel:
pytest.xfail("You can't normalize by a zero sum kernel")
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
y = [-1, -1, -1, -1, 8, -1, -1, -1, -1]
assert(np.isclose(sum(y), 0, atol=1e-8))
z = convolve(x, y, boundary=boundary, normalize_kernel=normalize_kernel)
# boundary, normalize_kernel == False
rslt = {
(None): [0., 0., 0., 0., 0., 0., 0., 0., 0.],
('fill'): [-6., -3., -1., 0., 0., 10., 21., 33., 46.],
('wrap'): [-36., -27., -18., -9., 0., 9., 18., 27., 36.],
('extend'): [-10., -6., -3., -1., 0., 1., 3., 6., 10.]
}[boundary]
assert_array_almost_equal_nulp(z, np.array(rslt, dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary', 'normalize_kernel'),
itertools.product(BOUNDARY_OPTIONS,
NORMALIZE_OPTIONS))
def test_int_masked_kernel(self, boundary, normalize_kernel):
"""
Test that convolve works correctly with integer masked kernels.
"""
if normalize_kernel:
pytest.xfail("You can't normalize by a zero sum kernel")
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
y = ma.array([-1, -1, -1, -1, 8, -1, -1, -1, -1], mask=[1, 0, 0, 0, 0, 0, 0, 0, 0], fill_value=0.)
z = convolve(x, y, boundary=boundary, normalize_kernel=normalize_kernel)
# boundary, normalize_kernel == False
rslt = {
(None): [0., 0., 0., 0., 9., 0., 0., 0., 0.],
('fill'): [-1., 3., 6., 8., 9., 10., 21., 33., 46.],
('wrap'): [-31., -21., -11., -1., 9., 10., 20., 30., 40.],
('extend'): [-5., 0., 4., 7., 9., 10., 12., 15., 19.]
}[boundary]
assert_array_almost_equal_nulp(z, np.array(rslt, dtype='>f8'), 10)
@pytest.mark.parametrize('preserve_nan', PRESERVE_NAN_OPTIONS)
def test_int_masked_array(self, preserve_nan):
"""
Test that convolve works correctly with integer masked arrays.
"""
x = ma.array([3, 5, 7, 11, 13], mask=[0, 0, 1, 0, 0], fill_value=0.)
y = np.array([1., 1., 1.], dtype='>f8')
z = convolve(x, y, preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[2])
z[2] = 8
assert_array_almost_equal_nulp(z, (8/3., 4, 8, 12, 8), 10)
class TestConvolve2D:
def test_list(self):
"""
Test that convolve works correctly when inputs are lists
"""
x = [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]
z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=True)
assert_array_almost_equal_nulp(z, x, 10)
z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=False)
assert_array_almost_equal_nulp(z, np.array(x, float)*9, 10)
@pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPE_MATRIX)
def test_dtype(self, dtype_array, dtype_kernel):
'''
Test that 32- and 64-bit floats are correctly handled
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype=dtype_array)
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype=dtype_kernel)
z = convolve(x, y)
assert x.dtype == z.dtype
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_1x1_none(self, boundary):
'''
Test that a 1x1 unit kernel returns the same array
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype='>f8')
y = np.array([[1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary)
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3x3(self, boundary):
'''
Test that a 3x3 unit kernel returns the same array (except when
boundary is None).
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype='>f8')
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='>f8')
z = convolve(x, y, boundary=boundary)
if boundary is None:
assert np.all(z == np.array([[0., 0., 0.],
[0., 5., 0.],
[0., 0., 0.]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel.
'''
x = np.array([[0., 0., 3.],
[1., 0., 0.],
[0., 2., 0.]], dtype='>f8')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 6., 0.],
[0., 0., 0.]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1., 4., 3.],
[3., 6., 5.],
[3., 3., 2.]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[6., 6., 6.],
[6., 6., 6.],
[6., 6., 6.]], dtype='>f8'), 10)
else:
assert_array_almost_equal_nulp(z, np.array([[2., 7., 12.],
[4., 6., 8.],
[6., 5., 4.]], dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3x3_withnan(self, boundary):
'''
Test that a 3x3 unit kernel returns the same array (except when
boundary is None). This version includes a NaN value in the original
array.
'''
x = np.array([[1., 2., 3.],
[4., np.nan, 6.],
[7., 8., 9.]], dtype='>f8')
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
preserve_nan=True)
assert np.isnan(z[1, 1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(z == np.array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3_withnanfilled(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[0., 0., 4.],
[1., np.nan, 0.],
[0., 3., 0.]], dtype='>f8')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 8., 0.],
[0., 0., 0.]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1., 5., 4.],
[4., 8., 7.],
[4., 4., 3.]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[8., 8., 8.],
[8., 8., 8.],
[8., 8., 8.]], dtype='>f8'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[2., 9., 16.],
[5., 8., 11.],
[8., 7., 6.]], dtype='>f8'), 10)
else:
raise ValueError("Invalid boundary specification")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3_withnaninterped(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[0., 0., 4.],
[1., np.nan, 0.],
[0., 3., 0.]], dtype='>f8')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='interpolate',
normalize_kernel=True)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1./8, 5./8, 4./8],
[4./8, 8./8, 7./8],
[4./8, 4./8, 3./8]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[2./8, 9./8, 16./8],
[5./8, 8./8, 11./8],
[8./8, 7./8, 6./8]], dtype='>f8'), 10)
else:
raise ValueError("Invalid boundary specification")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_non_normalized_kernel_2D(self, boundary):
x = np.array([[0., 0., 4.],
[1., 2., 0.],
[0., 3., 0.]], dtype='float')
y = np.array([[1., -1., 1.],
[-1., 0., -1.],
[1., -1., 1.]], dtype='float')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]], dtype='float'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1., -5., 2.],
[1., 0., -3.],
[-2., -1., -1.]], dtype='float'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[0., -8., 6.],
[5., 0., -4.],
[2., 3., -4.]], dtype='float'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[2., -1., -2.],
[0., 0., 1.],
[2., -4., 2.]], dtype='float'), 10)
else:
raise ValueError("Invalid boundary specification")
class TestConvolve3D:
def test_list(self):
"""
Test that convolve works correctly when inputs are lists
"""
x = [[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]]
z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=False)
assert_array_almost_equal_nulp(z / 27, x, 10)
@pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPE_MATRIX)
def test_dtype(self, dtype_array, dtype_kernel):
'''
Test that 32- and 64-bit floats are correctly handled
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype=dtype_array)
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype=dtype_kernel)
z = convolve(x, y)
assert x.dtype == z.dtype
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_1x1x1_none(self, boundary):
'''
Test that a 1x1x1 unit kernel returns the same array
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., 0., 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.array([[[1.]]], dtype='>f8')
z = convolve(x, y, boundary=boundary)
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3x3x3(self, boundary):
'''
Test that a 3x3x3 unit kernel returns the same array (except when
boundary is None).
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., 3., 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.zeros((3, 3, 3), dtype='>f8')
y[1, 1, 1] = 1.
z = convolve(x, y, boundary=boundary)
if boundary is None:
assert np.all(z == np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 3., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3x3(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., 3., 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.ones((3, 3, 3), dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 81., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[[23., 28., 16.], [35., 46., 25.], [25., 34., 18.]],
[[40., 50., 23.], [63., 81., 36.], [46., 60., 27.]],
[[32., 40., 16.], [50., 61., 22.], [36., 44., 16.]]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]],
[[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]],
[[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]]], dtype='>f8'), 10)
else:
assert_array_almost_equal_nulp(z, np.array([[[65., 54., 43.], [75., 66., 57.], [85., 78., 71.]],
[[96., 71., 46.], [108., 81., 54.], [120., 91., 62.]],
[[127., 88., 49.], [141., 96., 51.], [155., 104., 53.]]], dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary', 'nan_treatment'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS))
def test_unity_3x3x3_withnan(self, boundary, nan_treatment):
'''
Test that a 3x3x3 unit kernel returns the same array (except when
boundary is None). This version includes a NaN value in the original
array.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.zeros((3, 3, 3), dtype='>f8')
y[1, 1, 1] = 1.
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
preserve_nan=True)
assert np.isnan(z[1, 1, 1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(z == np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3x3_withnan_filled(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.ones((3, 3, 3), dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 78., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[[20., 25., 13.],
[32., 43., 22.],
[22., 31., 15.]],
[[37., 47., 20.],
[60., 78., 33.],
[43., 57., 24.]],
[[29., 37., 13.],
[47., 58., 19.],
[33., 41., 13.]]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]],
[[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]],
[[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]]], dtype='>f8'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[[62., 51., 40.],
[72., 63., 54.],
[82., 75., 68.]],
[[93., 68., 43.],
[105., 78., 51.],
[117., 88., 59.]],
[[124., 85., 46.],
[138., 93., 48.],
[152., 101., 50.]]],
dtype='>f8'), 10)
else:
raise ValueError("Invalid Boundary Option")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3x3_withnan_interped(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.ones((3, 3, 3), dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='interpolate',
normalize_kernel=True)
kernsum = y.sum() - 1 # one nan is missing
mid = x[np.isfinite(x)].sum() / kernsum
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 78., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
dtype='>f8')/kernsum, 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[[20., 25., 13.],
[32., 43., 22.],
[22., 31., 15.]],
[[37., 47., 20.],
[60., 78., 33.],
[43., 57., 24.]],
[[29., 37., 13.],
[47., 58., 19.],
[33., 41., 13.]]],
dtype='>f8')/kernsum, 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.tile(mid.astype('>f8'), [3, 3, 3]), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[[62., 51., 40.],
[72., 63., 54.],
[82., 75., 68.]],
[[93., 68., 43.],
[105., 78., 51.],
[117., 88., 59.]],
[[124., 85., 46.],
[138., 93., 48.],
[152., 101., 50.]]],
dtype='>f8')/kernsum, 10)
else:
raise ValueError("Invalid Boundary Option")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_asymmetric_kernel(boundary):
'''
Regression test for #6264: make sure that asymmetric convolution
functions go the right direction
'''
x = np.array([3., 0., 1.], dtype='>f8')
y = np.array([1, 2, 3], dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([6., 10., 2.], dtype='float'), 10)
elif boundary is None:
assert_array_almost_equal_nulp(z, np.array([0., 10., 0.], dtype='float'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([15., 10., 3.], dtype='float'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([9., 10., 5.], dtype='float'), 10)
@pytest.mark.parametrize('ndims', (1, 2, 3))
def test_convolution_consistency(ndims):
np.random.seed(0)
array = np.random.randn(*([3]*ndims))
np.random.seed(0)
kernel = np.random.rand(*([3]*ndims))
conv_f = convolve_fft(array, kernel, boundary='fill')
conv_d = convolve(array, kernel, boundary='fill')
assert_array_almost_equal_nulp(conv_f, conv_d, 30)
def test_astropy_convolution_against_numpy():
x = np.array([1, 2, 3])
y = np.array([5, 4, 3, 2, 1])
assert_array_almost_equal(np.convolve(y, x, 'same'),
convolve(y, x, normalize_kernel=False))
assert_array_almost_equal(np.convolve(y, x, 'same'),
convolve_fft(y, x, normalize_kernel=False))
@pytest.mark.skipif('not HAS_SCIPY')
def test_astropy_convolution_against_scipy():
from scipy.signal import fftconvolve
x = np.array([1, 2, 3])
y = np.array([5, 4, 3, 2, 1])
assert_array_almost_equal(fftconvolve(y, x, 'same'),
convolve(y, x, normalize_kernel=False))
assert_array_almost_equal(fftconvolve(y, x, 'same'),
convolve_fft(y, x, normalize_kernel=False))
@pytest.mark.skipif('not HAS_PANDAS')
def test_regression_6099():
wave = np.array((np.linspace(5000, 5100, 10)))
boxcar = 3
nonseries_result = convolve(wave, np.ones((boxcar,))/boxcar)
wave_series = pandas.Series(wave)
series_result = convolve(wave_series, np.ones((boxcar,))/boxcar)
assert_array_almost_equal(nonseries_result, series_result)
def test_invalid_array_convolve():
kernel = np.ones(3)/3.
with pytest.raises(TypeError):
convolve('glork', kernel)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_non_square_kernel_asymmetric(boundary):
# Regression test for a bug that occurred when using non-square kernels in
# 2D when using boundary=None
kernel = np.array([[1, 2, 3, 2, 1], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]])
image = np.zeros((13, 13))
image[6, 6] = 1
result = convolve(image, kernel, normalize_kernel=False, boundary=boundary)
assert_allclose(result[5:8, 4:9], kernel)
@pytest.mark.parametrize(('boundary', 'normalize_kernel'),
itertools.product(BOUNDARY_OPTIONS,
NORMALIZE_OPTIONS))
def test_uninterpolated_nan_regions(boundary, normalize_kernel):
#8086
# Test NaN interpolation of contiguous NaN regions with kernels of size
# identical and greater than that of the region of NaN values.
# Test case: kernel.shape == NaN_region.shape
kernel = Gaussian2DKernel(1, 5, 5)
nan_centroid = np.full(kernel.shape, np.nan)
image = np.pad(nan_centroid, pad_width=kernel.shape[0]*2, mode='constant',
constant_values=1)
with pytest.warns(AstropyUserWarning,
match="nan_treatment='interpolate', however, NaN values detected "
"post convolution. A contiguous region of NaN values, larger "
"than the kernel size, are present in the input array. "
"Increase the kernel size to avoid this."):
result = convolve(image, kernel, boundary=boundary, nan_treatment='interpolate',
normalize_kernel=normalize_kernel)
assert(np.any(np.isnan(result)))
# Test case: kernel.shape > NaN_region.shape
nan_centroid = np.full((kernel.shape[0]-1, kernel.shape[1]-1), np.nan) # 1 smaller than kerenel
image = np.pad(nan_centroid, pad_width=kernel.shape[0]*2, mode='constant',
constant_values=1)
result = convolve(image, kernel, boundary=boundary, nan_treatment='interpolate',
normalize_kernel=normalize_kernel)
assert(~np.any(np.isnan(result))) # Note: negation
|
80ca7e5d2f38dbeaad27ba5b5553b4c683ee50c065a387d1a4cbd4627f3abee4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import math
import numpy as np
import pytest
from astropy.convolution.convolve import convolve, convolve_fft, convolve_models
from astropy.modeling import models, fitting
from astropy.utils.misc import NumpyRNGContext
from numpy.testing import assert_allclose, assert_almost_equal
try:
import scipy
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
class TestConvolve1DModels:
@pytest.mark.parametrize('mode', ['convolve_fft', 'convolve'])
@pytest.mark.skipif('not HAS_SCIPY')
def test_is_consistency_with_astropy_convolution(self, mode):
kernel = models.Gaussian1D(1, 0, 1)
model = models.Gaussian1D(1, 0, 1)
model_conv = convolve_models(model, kernel, mode=mode)
x = np.arange(-5, 6)
ans = eval("{}(model(x), kernel(x))".format(mode))
assert_allclose(ans, model_conv(x), atol=1e-5)
@pytest.mark.parametrize('mode', ['convolve_fft', 'convolve'])
@pytest.mark.skipif('not HAS_SCIPY')
def test_against_scipy(self, mode):
from scipy.signal import fftconvolve
kernel = models.Gaussian1D(1, 0, 1)
model = models.Gaussian1D(1, 0, 1)
model_conv = convolve_models(model, kernel, mode=mode)
x = np.arange(-5, 6)
ans = fftconvolve(kernel(x), model(x), mode='same')
assert_allclose(ans, model_conv(x) * kernel(x).sum(), atol=1e-5)
@pytest.mark.parametrize('mode', ['convolve_fft', 'convolve'])
@pytest.mark.skipif('not HAS_SCIPY')
def test_against_scipy_with_additional_keywords(self, mode):
from scipy.signal import fftconvolve
kernel = models.Gaussian1D(1, 0, 1)
model = models.Gaussian1D(1, 0, 1)
model_conv = convolve_models(model, kernel, mode=mode,
normalize_kernel=False)
x = np.arange(-5, 6)
ans = fftconvolve(kernel(x), model(x), mode='same')
assert_allclose(ans, model_conv(x), atol=1e-5)
@pytest.mark.parametrize('mode', ['convolve_fft', 'convolve'])
def test_sum_of_gaussians(self, mode):
"""
Test that convolving N(a, b) with N(c, d) gives N(a + c, b + d),
where N(., .) stands for Gaussian probability density function,
in which a and c are their means and b and d are their variances.
"""
kernel = models.Gaussian1D(1 / math.sqrt(2 * np.pi), 1, 1)
model = models.Gaussian1D(1 / math.sqrt(2 * np.pi), 3, 1)
model_conv = convolve_models(model, kernel, mode=mode,
normalize_kernel=False)
ans = models.Gaussian1D(1 / (2 * math.sqrt(np.pi)), 4, np.sqrt(2))
x = np.arange(-5, 6)
assert_allclose(ans(x), model_conv(x), atol=1e-3)
@pytest.mark.parametrize('mode', ['convolve_fft', 'convolve'])
def test_convolve_box_models(self, mode):
kernel = models.Box1D()
model = models.Box1D()
model_conv = convolve_models(model, kernel, mode=mode)
x = np.linspace(-1, 1, 99)
ans = (x + 1) * (x < 0) + (-x + 1) * (x >= 0)
assert_allclose(ans, model_conv(x), atol=1e-3)
@pytest.mark.parametrize('mode', ['convolve_fft', 'convolve'])
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitting_convolve_models(self, mode):
"""
test that a convolve model can be fitted
"""
b1 = models.Box1D()
g1 = models.Gaussian1D()
x = np.linspace(-5, 5, 99)
fake_model = models.Gaussian1D(amplitude=10)
with NumpyRNGContext(123):
fake_data = fake_model(x) + np.random.normal(size=len(x))
init_model = convolve_models(b1, g1, mode=mode, normalize_kernel=False)
fitter = fitting.LevMarLSQFitter()
fitted_model = fitter(init_model, x, fake_data)
me = np.mean(fitted_model(x) - fake_data)
assert_almost_equal(me, 0.0, decimal=2)
|
37fe265a617b78473dbeb5531df305b116b0379fc79ad1401f2fc239f6042c5e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import timeit
import numpy as np # pylint: disable=W0611
# largest image size to use for "linear" and fft convolutions
max_exponents_linear = {1: 15, 2: 7, 3: 5}
max_exponents_fft = {1: 15, 2: 10, 3: 7}
if __name__ == "__main__":
for ndims in [1, 2, 3]:
print("\n{}-dimensional arrays ('n' is the size of the image AND "
"the kernel)".format(ndims))
print(" ".join(["%17s" % n for n in ("n", "convolve", "convolve_fft")]))
for ii in range(3, max_exponents_fft[ndims]):
# array = np.random.random([2**ii]*ndims)
# test ODD sizes too
if ii < max_exponents_fft[ndims]:
setup = ("""
import numpy as np
from astropy.convolution.convolve import convolve
from astropy.convolution.convolve import convolve_fft
array = np.random.random([%i]*%i)
kernel = np.random.random([%i]*%i)""") % (2 ** ii - 1, ndims, 2 ** ii - 1, ndims)
print("%16i:" % (int(2 ** ii - 1)), end=' ')
if ii <= max_exponents_linear[ndims]:
for convolve_type, extra in zip(("", "_fft"),
("", "fft_pad=False")):
statement = "convolve{}(array, kernel, boundary='fill', {})".format(convolve_type, extra)
besttime = min(timeit.Timer(stmt=statement, setup=setup).repeat(3, 10))
print("%17f" % (besttime), end=' ')
else:
print("%17s" % "skipped", end=' ')
statement = "convolve_fft(array, kernel, boundary='fill')"
besttime = min(timeit.Timer(stmt=statement, setup=setup).repeat(3, 10))
print("%17f" % (besttime), end=' ')
print()
setup = ("""
import numpy as np
from astropy.convolution.convolve import convolve
from astropy.convolution.convolve import convolve_fft
array = np.random.random([%i]*%i)
kernel = np.random.random([%i]*%i)""") % (2 ** ii, ndims, 2 ** ii, ndims)
print("%16i:" % (int(2 ** ii)), end=' ')
if ii <= max_exponents_linear[ndims]:
for convolve_type in ("", "_fft",):
# convolve doesn't allow even-sized kernels
if convolve_type == "":
print("%17s" % ("-"), end=' ')
else:
statement = "convolve{}(array, kernel, boundary='fill')".format(convolve_type)
besttime = min(timeit.Timer(stmt=statement, setup=setup).repeat(3, 10))
print("%17f" % (besttime), end=' ')
else:
print("%17s" % "skipped", end=' ')
statement = "convolve_fft(array, kernel, boundary='fill')"
besttime = min(timeit.Timer(stmt=statement, setup=setup).repeat(3, 10))
print("%17f" % (besttime), end=' ')
print()
"""
Unfortunately, these tests are pretty strongly inconclusive
NOTE: Runtime has units seconds and represents wall clock time.
RESULTS on a late 2013 Mac Pro:
3.5 GHz 6-Core Intel Xeon E5
32 GB 1866 MHz DDR3 ECC
Python 3.5.4 :: Anaconda custom (x86_64)
clang version 6.0.0 (tags/RELEASE_600/final)
llvm-opnemp r327556 | grokos | 2018-03-14 15:11:36 -0400 (Wed, 14 Mar 2018)
With OpenMP (hyperthreaded 12procs), convolve() only:
1-dimensional arrays ('n' is the size of the image AND the kernel)
n convolve convolve_fft
7: 0.002895 0.007321
15: 0.002684 0.008028
31: 0.002733 0.008684
63: 0.002728 0.009127
127: 0.002851 0.012659
255: 0.002835 0.010550
511: 0.003051 0.017137
1023: 0.004042 0.019384
2047: 0.007371 0.049246
4095: 0.021903 0.039821
8191: 0.067098 8.335749
16383: 0.256072 0.272165
2-dimensional arrays ('n' is the size of the image AND the kernel)
n convolve convolve_fft
7: 0.002696 0.014745
15: 0.002839 0.014826
31: 0.004286 0.045167
63: 0.022941 0.063715
127: 0.325557 0.925577
255: skipped 0.694621
511: skipped 3.734946
3-dimensional arrays ('n' is the size of the image AND the kernel)
n convolve convolve_fft
7: 0.003502 0.033121
8: 0.003407 0.030351
15: 0.026338 0.062235
31: 1.239503 1.586930
63: skipped 10.792675
With OpenMP but single threaded (n_threads = 1), convolve() only:
1-dimensional arrays ('n' is the size of the image AND the kernel)
n convolve convolve_fft
7: 0.001754 0.004687
15: 0.001706 0.005133
31: 0.001744 0.005381
63: 0.001725 0.005582
127: 0.001801 0.007405
255: 0.002262 0.006528
511: 0.003866 0.009913
1023: 0.009820 0.011511
2047: 0.034707 0.028171
4095: 0.132908 0.024133
8191: 0.527692 8.311933
16383: 2.103046 0.269368
2-dimensional arrays ('n' is the size of the image AND the kernel)
n convolve convolve_fft
7: 0.001734 0.009458
15: 0.002336 0.010310
31: 0.009123 0.025427
63: 0.126701 0.040610
127: 2.126114 0.926549
255: skipped 0.690896
511: skipped 3.756475
3-dimensional arrays ('n' is the size of the image AND the kernel)
n convolve convolve_fft
7: 0.002822 0.019498
15: 0.096008 0.063744
31: 7.373533 1.578913
63: skipped 10.811530
RESULTS on a 2011 Mac Air:
1-dimensional arrays ('n' is the size of the image AND the kernel)
n convolve convolve_fftnp convolve_fftw convolve_fftsp
7: 0.000408 0.002334 0.005571 0.002677
15: 0.000361 0.002491 0.005648 0.002678
31: 0.000535 0.002450 0.005988 0.002880
63: 0.000509 0.002876 0.008003 0.002981
127: 0.000801 0.004080 0.008513 0.003932
255: 0.002453 0.003111 0.007518 0.003564
511: 0.008394 0.006224 0.010247 0.005991
1023: 0.028741 0.007538 0.009591 0.007696
2047: 0.106323 0.021575 0.022041 0.020682
4095: 0.411936 0.021675 0.019761 0.020939
8191: 1.664517 8.278320 0.073001 7.803563
16383: 6.654678 0.251661 0.202271 0.222171
2-dimensional arrays ('n' is the size of the image AND the kernel)
n convolve convolve_fftnp convolve_fftw convolve_fftsp
7: 0.000552 0.003524 0.006667 0.004318
15: 0.002986 0.005093 0.012941 0.005951
31: 0.074360 0.033973 0.031800 0.036937
63: 0.848471 0.057407 0.052192 0.053213
127: 14.656414 1.005329 0.402113 0.955279
255: skipped 1.715546 1.566876 1.745338
511: skipped 4.066155 4.303350 3.930661
3-dimensional arrays ('n' is the size of the image AND the kernel)
n convolve convolve_fftnp convolve_fftw convolve_fftsp
7: 0.009239 0.012957 0.011957 0.015997
15: 0.772434 0.075621 0.056711 0.079508
31: 62.824051 2.295193 1.189505 2.351136
63: skipped 11.250225 10.982726 10.585744
On a 2009 Mac Pro:
1-dimensional arrays ('n' is the size of the image AND the kernel)
n convolve convolve_fftnp convolve_fftw convolve_fftsp
7: 0.000360 0.002269 0.004986 0.002476
15: 0.000364 0.002255 0.005244 0.002471
31: 0.000385 0.002380 0.005422 0.002588
63: 0.000474 0.002407 0.005392 0.002637
127: 0.000752 0.004122 0.007827 0.003966
255: 0.004316 0.003258 0.006566 0.003324
511: 0.011517 0.007158 0.009898 0.006238
1023: 0.034105 0.009211 0.009468 0.008260
2047: 0.113620 0.028097 0.020662 0.021603
4095: 0.403373 0.023211 0.018767 0.020065
8191: 1.519329 8.454573 0.211436 7.212381
16383: 5.887481 0.317428 0.153344 0.237119
2-dimensional arrays ('n' is the size of the image AND the kernel)
n convolve convolve_fftnp convolve_fftw convolve_fftsp
7: 0.000474 0.003470 0.006131 0.003503
15: 0.002011 0.004481 0.007825 0.004496
31: 0.027291 0.019433 0.014841 0.018034
63: 0.445680 0.038171 0.026753 0.037404
127: 7.003774 0.925921 0.282591 0.762671
255: skipped 0.804682 0.708849 0.869368
511: skipped 3.643626 3.687562 4.584770
3-dimensional arrays ('n' is the size of the image AND the kernel)
n convolve convolve_fftnp convolve_fftw convolve_fftsp
7: 0.004520 0.011519 0.009464 0.012335
15: 0.329566 0.060978 0.045495 0.073692
31: 24.935228 1.654920 0.710509 1.773879
63: skipped 8.982771 12.407683 16.900078
"""
|
0956f7da0f21f84dc12b0ae93c5945593963d95ea2619a83a987348930eb5821 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import sys
import subprocess
import pytest
from astropy.tests.helper import catch_warnings
from astropy.utils.data import get_pkg_data_filename
from astropy.config import configuration
from astropy.config import paths
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_paths():
assert 'astropy' in paths.get_config_dir()
assert 'astropy' in paths.get_cache_dir()
def test_set_temp_config(tmpdir, monkeypatch):
monkeypatch.setattr(paths.set_temp_config, '_temp_path', None)
orig_config_dir = paths.get_config_dir()
temp_config_dir = str(tmpdir.mkdir('config'))
temp_astropy_config = os.path.join(temp_config_dir, 'astropy')
# Test decorator mode
@paths.set_temp_config(temp_config_dir)
def test_func():
assert paths.get_config_dir() == temp_astropy_config
# Test temporary restoration of original default
with paths.set_temp_config() as d:
assert d == orig_config_dir == paths.get_config_dir()
test_func()
# Test context manager mode (with cleanup)
with paths.set_temp_config(temp_config_dir, delete=True):
assert paths.get_config_dir() == temp_astropy_config
assert not os.path.exists(temp_config_dir)
def test_set_temp_cache(tmpdir, monkeypatch):
monkeypatch.setattr(paths.set_temp_cache, '_temp_path', None)
orig_cache_dir = paths.get_cache_dir()
temp_cache_dir = str(tmpdir.mkdir('cache'))
temp_astropy_cache = os.path.join(temp_cache_dir, 'astropy')
# Test decorator mode
@paths.set_temp_cache(temp_cache_dir)
def test_func():
assert paths.get_cache_dir() == temp_astropy_cache
# Test temporary restoration of original default
with paths.set_temp_cache() as d:
assert d == orig_cache_dir == paths.get_cache_dir()
test_func()
# Test context manager mode (with cleanup)
with paths.set_temp_cache(temp_cache_dir, delete=True):
assert paths.get_cache_dir() == temp_astropy_cache
assert not os.path.exists(temp_cache_dir)
def test_config_file():
from astropy.config.configuration import get_config, reload_config
apycfg = get_config('astropy')
assert apycfg.filename.endswith('astropy.cfg')
cfgsec = get_config('astropy.config')
assert cfgsec.depth == 1
assert cfgsec.name == 'config'
assert cfgsec.parent.filename.endswith('astropy.cfg')
reload_config('astropy')
def test_configitem():
from astropy.config.configuration import ConfigNamespace, ConfigItem, get_config
ci = ConfigItem(34, 'this is a Description')
class Conf(ConfigNamespace):
tstnm = ci
conf = Conf()
assert ci.module == 'astropy.config.tests.test_configs'
assert ci() == 34
assert ci.description == 'this is a Description'
assert conf.tstnm == 34
sec = get_config(ci.module)
assert sec['tstnm'] == 34
ci.description = 'updated Descr'
ci.set(32)
assert ci() == 32
# It's useful to go back to the default to allow other test functions to
# call this one and still be in the default configuration.
ci.description = 'this is a Description'
ci.set(34)
assert ci() == 34
def test_configitem_types():
from astropy.config.configuration import ConfigNamespace, ConfigItem
cio = ConfigItem(['op1', 'op2', 'op3'])
class Conf(ConfigNamespace):
tstnm1 = ConfigItem(34)
tstnm2 = ConfigItem(34.3)
tstnm3 = ConfigItem(True)
tstnm4 = ConfigItem('astring')
conf = Conf()
assert isinstance(conf.tstnm1, int)
assert isinstance(conf.tstnm2, float)
assert isinstance(conf.tstnm3, bool)
assert isinstance(conf.tstnm4, str)
with pytest.raises(TypeError):
conf.tstnm1 = 34.3
conf.tstnm2 = 12 # this would should succeed as up-casting
with pytest.raises(TypeError):
conf.tstnm3 = 'fasd'
with pytest.raises(TypeError):
conf.tstnm4 = 546.245
def test_configitem_options(tmpdir):
from astropy.config.configuration import ConfigNamespace, ConfigItem, get_config
cio = ConfigItem(['op1', 'op2', 'op3'])
class Conf(ConfigNamespace):
tstnmo = cio
conf = Conf()
sec = get_config(cio.module)
assert isinstance(cio(), str)
assert cio() == 'op1'
assert sec['tstnmo'] == 'op1'
cio.set('op2')
with pytest.raises(TypeError):
cio.set('op5')
assert sec['tstnmo'] == 'op2'
# now try saving
apycfg = sec
while apycfg.parent is not apycfg:
apycfg = apycfg.parent
f = tmpdir.join('astropy.cfg')
with open(f.strpath, 'wb') as fd:
apycfg.write(fd)
with open(f.strpath, 'r', encoding='utf-8') as fd:
lns = [x.strip() for x in f.readlines()]
assert 'tstnmo = op2' in lns
def test_config_noastropy_fallback(monkeypatch):
"""
Tests to make sure configuration items fall back to their defaults when
there's a problem accessing the astropy directory
"""
# make sure the config directory is not searched
monkeypatch.setenv(str('XDG_CONFIG_HOME'), 'foo')
monkeypatch.delenv(str('XDG_CONFIG_HOME'))
monkeypatch.setattr(paths.set_temp_config, '_temp_path', None)
# make sure the _find_or_create_astropy_dir function fails as though the
# astropy dir could not be accessed
def osraiser(dirnm, linkto):
raise OSError
monkeypatch.setattr(paths, '_find_or_create_astropy_dir', osraiser)
# also have to make sure the stored configuration objects are cleared
monkeypatch.setattr(configuration, '_cfgobjs', {})
with pytest.raises(OSError):
# make sure the config dir search fails
paths.get_config_dir()
# now run the basic tests, and make sure the warning about no astropy
# is present
with catch_warnings(configuration.ConfigurationMissingWarning) as w:
test_configitem()
assert len(w) == 1
w = w[0]
assert 'Configuration defaults will be used' in str(w.message)
def test_configitem_setters():
from astropy.config.configuration import ConfigNamespace, ConfigItem
class Conf(ConfigNamespace):
tstnm12 = ConfigItem(42, 'this is another Description')
conf = Conf()
assert conf.tstnm12 == 42
with conf.set_temp('tstnm12', 45):
assert conf.tstnm12 == 45
assert conf.tstnm12 == 42
conf.tstnm12 = 43
assert conf.tstnm12 == 43
with conf.set_temp('tstnm12', 46):
assert conf.tstnm12 == 46
# Make sure it is reset even with Exception
try:
with conf.set_temp('tstnm12', 47):
raise Exception
except Exception:
pass
assert conf.tstnm12 == 43
def test_empty_config_file():
from astropy.config.configuration import is_unedited_config_file
def get_content(fn):
with open(get_pkg_data_filename(fn), 'rt', encoding='latin-1') as fd:
return fd.read()
content = get_content('data/empty.cfg')
assert is_unedited_config_file(content)
content = get_content('data/not_empty.cfg')
assert not is_unedited_config_file(content)
content = get_content('data/astropy.0.3.cfg')
assert is_unedited_config_file(content)
content = get_content('data/astropy.0.3.windows.cfg')
assert is_unedited_config_file(content)
class TestAliasRead:
def setup_class(self):
configuration._override_config_file = get_pkg_data_filename('data/alias.cfg')
def test_alias_read(self):
from astropy.utils.data import conf
with catch_warnings() as w:
conf.reload()
assert conf.remote_timeout == 42
assert len(w) == 1
assert str(w[0].message).startswith(
"Config parameter 'name_resolve_timeout' in section "
"[coordinates.name_resolve]")
def teardown_class(self):
from astropy.utils.data import conf
configuration._override_config_file = None
conf.reload()
def test_configitem_unicode(tmpdir):
from astropy.config.configuration import ConfigNamespace, ConfigItem, get_config
cio = ConfigItem('ასტრონომიის')
class Conf(ConfigNamespace):
tstunicode = cio
conf = Conf()
sec = get_config(cio.module)
assert isinstance(cio(), str)
assert cio() == 'ასტრონომიის'
assert sec['tstunicode'] == 'ასტრონომიის'
def test_warning_move_to_top_level():
# Check that the warning about deprecation config items in the
# file works. See #2514
from astropy import conf
configuration._override_config_file = get_pkg_data_filename('data/deprecated.cfg')
try:
with catch_warnings(AstropyDeprecationWarning) as w:
conf.reload()
conf.max_lines
assert len(w) == 1
finally:
configuration._override_config_file = None
conf.reload()
def test_no_home():
# "import astropy" fails when neither $HOME or $XDG_CONFIG_HOME
# are set. To test, we unset those environment variables for a
# subprocess and try to import astropy.
test_path = os.path.dirname(__file__)
astropy_path = os.path.abspath(
os.path.join(test_path, '..', '..', '..'))
env = os.environ.copy()
paths = [astropy_path]
if env.get('PYTHONPATH'):
paths.append(env.get('PYTHONPATH'))
env[str('PYTHONPATH')] = str(os.pathsep.join(paths))
for val in ['HOME', 'XDG_CONFIG_HOME']:
if val in env:
del env[val]
retcode = subprocess.check_call(
[sys.executable, '-c', 'import astropy'],
env=env)
assert retcode == 0
def test_unedited_template():
# Test that the config file is written at most once
config_dir = os.path.join(os.path.dirname(__file__), '..', '..')
configuration.update_default_config('astropy', config_dir)
assert configuration.update_default_config('astropy', config_dir) is False
|
9671822ba805b790c35c9982a1e0dbe68f1b4d9f91e6b399267959190ae35931 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import numpy as np
from astropy import units as u
from astropy.uncertainty.core import Distribution
from astropy.uncertainty import distributions as ds
from astropy.utils import NumpyRNGContext
from astropy.tests.helper import assert_quantity_allclose, pytest
try:
from scipy.stats import norm # pylint: disable=W0611
SMAD_FACTOR = 1 / norm.ppf(0.75)
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
def test_numpy_init():
# Test that we can initialize directly from a Numpy array
rates = np.array([1, 5, 30, 400])[:, np.newaxis]
parr = np.random.poisson(rates, (4, 1000))
Distribution(parr)
def test_numpy_init_T():
rates = np.array([1, 5, 30, 400])
parr = np.random.poisson(rates, (1000, 4))
Distribution(parr.T)
def test_quantity_init():
# Test that we can initialize directly from a Quantity
pq = np.random.poisson(np.array([1, 5, 30, 400])[:, np.newaxis],
(4, 1000)) * u.ct
Distribution(pq)
def test_quantity_init_T():
# Test that we can initialize directly from a Quantity
pq = np.random.poisson(np.array([1, 5, 30, 400]), (1000, 4)) * u.ct
Distribution(pq.T)
def test_init_scalar():
parr = np.random.poisson(np.array([1, 5, 30, 400])[:, np.newaxis],
(4, 1000))
with pytest.raises(TypeError) as exc:
Distribution(parr.ravel()[0])
assert exc.value.args[0] == "Attempted to initialize a Distribution with a scalar"
class TestDistributionStatistics():
def setup_class(self):
with NumpyRNGContext(12345):
self.data = np.random.normal(np.array([1, 2, 3, 4])[:, np.newaxis],
np.array([3, 2, 4, 5])[:, np.newaxis],
(4, 10000))
self.distr = Distribution(self.data * u.kpc)
def test_shape(self):
# Distribution shape
assert self.distr.shape == (4, )
assert self.distr.distribution.shape == (4, 10000)
def test_size(self):
# Total number of values
assert self.distr.size == 4
assert self.distr.distribution.size == 40000
def test_n_samples(self):
# Number of samples
assert self.distr.n_samples == 10000
def test_n_distr(self):
assert self.distr.shape == (4,)
def test_pdf_mean(self):
# Mean of each PDF
expected = np.mean(self.data, axis=-1) * self.distr.unit
assert_quantity_allclose(self.distr.pdf_mean, expected)
assert_quantity_allclose(self.distr.pdf_mean, [1, 2, 3, 4] * self.distr.unit, rtol=0.05)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(self.distr.pdf_mean, Distribution)
assert isinstance(self.distr.pdf_mean, u.Quantity)
def test_pdf_std(self):
# Standard deviation of each PDF
expected = np.std(self.data, axis=-1) * self.distr.unit
assert_quantity_allclose(self.distr.pdf_std, expected)
assert_quantity_allclose(self.distr.pdf_std, [3, 2, 4, 5] * self.distr.unit, rtol=0.05)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(self.distr.pdf_std, Distribution)
assert isinstance(self.distr.pdf_std, u.Quantity)
def test_pdf_var(self):
# Variance of each PDF
expected = np.var(self.data, axis=-1) * self.distr.unit**2
assert_quantity_allclose(self.distr.pdf_var, expected)
assert_quantity_allclose(self.distr.pdf_var, [9, 4, 16, 25] * self.distr.unit**2, rtol=0.1)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(self.distr.pdf_var, Distribution)
assert isinstance(self.distr.pdf_var, u.Quantity)
def test_pdf_median(self):
# Median of each PDF
expected = np.median(self.data, axis=-1) * self.distr.unit
assert_quantity_allclose(self.distr.pdf_median, expected)
assert_quantity_allclose(self.distr.pdf_median, [1, 2, 3, 4] * self.distr.unit, rtol=0.1)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(self.distr.pdf_median, Distribution)
assert isinstance(self.distr.pdf_median, u.Quantity)
@pytest.mark.skipif(not HAS_SCIPY, reason='no scipy')
def test_pdf_mad_smad(self):
# Median absolute deviation of each PDF
median = np.median(self.data, axis=-1, keepdims=True)
expected = np.median(np.abs(self.data - median), axis=-1) * self.distr.unit
assert_quantity_allclose(self.distr.pdf_mad, expected)
assert_quantity_allclose(self.distr.pdf_smad, self.distr.pdf_mad * SMAD_FACTOR, rtol=1e-5)
assert_quantity_allclose(self.distr.pdf_smad, [3, 2, 4, 5] * self.distr.unit, rtol=0.05)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(self.distr.pdf_mad, Distribution)
assert isinstance(self.distr.pdf_mad, u.Quantity)
assert not isinstance(self.distr.pdf_smad, Distribution)
assert isinstance(self.distr.pdf_smad, u.Quantity)
def test_percentile(self):
expected = np.percentile(self.data, [10, 50, 90], axis=-1) * self.distr.unit
percs = self.distr.pdf_percentiles([10, 50, 90])
assert_quantity_allclose(percs, expected)
assert percs.shape == (3, 4)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(percs, Distribution)
assert isinstance(percs, u.Quantity)
def test_add_quantity(self):
distrplus = self.distr + [2000, 0, 0, 500] * u.pc
expected = (np.median(self.data, axis=-1) + np.array([2, 0, 0, 0.5])) * self.distr.unit
assert_quantity_allclose(distrplus.pdf_median, expected)
expected = np.var(self.data, axis=-1) * self.distr.unit**2
assert_quantity_allclose(distrplus.pdf_var, expected)
def test_add_distribution(self):
another_data = (np.random.randn(4, 10000)
* np.array([1000, .01, 80, 10])[:, np.newaxis]
+ np.array([2000, 0, 0, 500])[:, np.newaxis])
# another_data is in pc, but main distr is in kpc
another_distr = Distribution(another_data * u.pc)
combined_distr = self.distr + another_distr
expected = np.median(self.data + another_data/1000,
axis=-1) * self.distr.unit
assert_quantity_allclose(combined_distr.pdf_median, expected)
expected = np.var(self.data + another_data/1000, axis=-1) * self.distr.unit**2
assert_quantity_allclose(combined_distr.pdf_var, expected)
def test_helper_normal_samples():
centerq = [1, 5, 30, 400] * u.kpc
with NumpyRNGContext(12345):
n_dist = ds.normal(centerq, std=[0.2, 1.5, 4, 1]*u.kpc, n_samples=100)
assert n_dist.distribution.shape == (4, 100)
assert n_dist.shape == (4, )
assert n_dist.unit == u.kpc
assert np.all(n_dist.pdf_std > 100*u.pc)
n_dist2 = ds.normal(centerq, std=[0.2, 1.5, 4, 1]*u.pc, n_samples=20000)
assert n_dist2.distribution.shape == (4, 20000)
assert n_dist2.shape == (4, )
assert n_dist2.unit == u.kpc
assert np.all(n_dist2.pdf_std < 100*u.pc)
def test_helper_poisson_samples():
centerqcounts = [1, 5, 30, 400] * u.count
with NumpyRNGContext(12345):
p_dist = ds.poisson(centerqcounts, n_samples=100)
assert p_dist.shape == (4,)
assert p_dist.distribution.shape == (4, 100)
assert p_dist.unit == u.count
p_min = np.min(p_dist)
assert isinstance(p_min, Distribution)
assert p_min.shape == ()
assert np.all(p_min >= 0)
assert np.all(np.abs(p_dist.pdf_mean - centerqcounts) < centerqcounts)
def test_helper_uniform_samples():
udist = ds.uniform(lower=[1, 2]*u.kpc, upper=[3, 4]*u.kpc, n_samples=1000)
assert udist.shape == (2, )
assert udist.distribution.shape == (2, 1000)
assert np.all(np.min(udist.distribution, axis=-1) > [1, 2]*u.kpc)
assert np.all(np.max(udist.distribution, axis=-1) < [3, 4]*u.kpc)
# try the alternative creator
udist = ds.uniform(center=[1, 3, 2] * u.pc, width=[5, 4, 3] * u.pc, n_samples=1000)
assert udist.shape == (3, )
assert udist.distribution.shape == (3, 1000)
assert np.all(np.min(udist.distribution, axis=-1) > [-1.5, 1, 0.5]*u.pc)
assert np.all(np.max(udist.distribution, axis=-1) < [3.5, 5, 3.5]*u.pc)
def test_helper_normal_exact():
pytest.skip('distribution stretch goal not yet implemented')
centerq = [1, 5, 30, 400] * u.kpc
ds.normal(centerq, std=[0.2, 1.5, 4, 1]*u.kpc)
ds.normal(centerq, var=[0.04, 2.25, 16, 1]*u.kpc**2)
ds.normal(centerq, ivar=[25, 0.44444444, 0.625, 1]*u.kpc**-2)
def test_helper_poisson_exact():
pytest.skip('distribution stretch goal not yet implemented')
centerq = [1, 5, 30, 400] * u.one
ds.poisson(centerq, n_samples=1000)
with pytest.raises(u.UnitsError) as exc:
centerq = [1, 5, 30, 400] * u.kpc
ds.poisson(centerq, n_samples=1000)
assert exc.value.args[0] == ("Poisson distribution can only be computed "
"for dimensionless quantities")
def test_reprs():
darr = np.arange(30).reshape(3, 10)
distr = Distribution(darr * u.kpc)
assert 'n_samples=10' in repr(distr)
assert 'n_samples=10' in str(distr)
assert r'n_{\rm samp}=10' in distr._repr_latex_()
@pytest.mark.parametrize("func, kws", [
(ds.normal, {'center': 0, 'std': 2}),
(ds.uniform, {'lower': 0, 'upper': 2}),
(ds.poisson, {'center': 2}),
(ds.normal, {'center': 0*u.count, 'std': 2*u.count}),
(ds.uniform, {'lower': 0*u.count, 'upper': 2*u.count}),
(ds.poisson, {'center': 2*u.count})
])
def test_wrong_kw_fails(func, kws):
with pytest.raises(Exception):
kw_temp = kws.copy()
kw_temp['n_sample'] = 100 # note the missing "s"
assert func(**kw_temp).n_samples == 100
kw_temp = kws.copy()
kw_temp['n_samples'] = 100
assert func(**kw_temp).n_samples == 100
def test_index_assignment_quantity():
arr = np.random.randn(2, 1000)
distr = Distribution(arr*u.kpc)
d1q, d2q = distr
assert isinstance(d1q, Distribution)
assert isinstance(d2q, Distribution)
ndistr = ds.normal(center=[1, 2]*u.kpc, std=[3, 4]*u.kpc, n_samples=1000)
n1, n2 = ndistr
assert isinstance(n1, ds.Distribution)
assert isinstance(n2, ds.Distribution)
def test_index_assignment_array():
arr = np.random.randn(2, 1000)
distr = Distribution(arr)
d1a, d2a = distr
assert isinstance(d1a, Distribution)
assert isinstance(d2a, Distribution)
ndistr = ds.normal(center=[1, 2], std=[3, 4], n_samples=1000)
n1, n2 = ndistr
assert isinstance(n1, ds.Distribution)
assert isinstance(n2, ds.Distribution)
def test_histogram():
arr = np.random.randn(2, 3, 1000)
distr = Distribution(arr)
hist, bins = distr.pdf_histogram(bins=10)
assert hist.shape == (2, 3, 10)
assert bins.shape == (2, 3, 11)
def test_array_repr_latex():
# as of this writing ndarray does not have a _repr_latex_, and this test
# ensure distributions account for that. However, if in the future ndarray
# gets a _repr_latex_, we can skip this.
arr = np.random.randn(4, 1000)
if hasattr(arr, '_repr_latex_'):
pytest.skip('in this version of numpy, ndarray has a _repr_latex_')
distr = Distribution(arr)
assert distr._repr_latex_() is None
|
5039f90ce3e00c0361e6bc217036839c001c2f2aff0351f8b5bc5261f1305926 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test sky projections defined in WCS Paper II"""
import os
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
from astropy.modeling import projections
from astropy.modeling.parameters import InputParameterError
from astropy import units as u
from astropy.io import fits
from astropy import wcs
from astropy.utils.data import get_pkg_data_filename
from astropy.tests.helper import assert_quantity_allclose
def test_Projection_properties():
projection = projections.Sky2Pix_PlateCarree()
assert projection.n_inputs == 2
assert projection.n_outputs == 2
PIX_COORDINATES = [-10, 30]
MAPS_DIR = os.path.join(os.pardir, os.pardir, "wcs", "tests", "data", "maps")
pars = [(x,) for x in projections.projcodes]
# There is no groundtruth file for the XPH projection available here:
# http://www.atnf.csiro.au/people/mcalabre/WCS/example_data.html
pars.remove(('XPH',))
@pytest.mark.parametrize(('code',), pars)
def test_Sky2Pix(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(MAPS_DIR, "1904-66_{0}.hdr".format(code))
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = 'PV2_{0}'.format(i + 1)
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0., 0.]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_pix = w.wcs.s2p(wcslibout['world'], 1)['pixcrd']
model = getattr(projections, 'Sky2Pix_' + code)
tinv = model(*params)
x, y = tinv(wcslibout['phi'], wcslibout['theta'])
assert_almost_equal(np.asarray(x), wcs_pix[:, 0])
assert_almost_equal(np.asarray(y), wcs_pix[:, 1])
@pytest.mark.parametrize(('code',), pars)
def test_Pix2Sky(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(MAPS_DIR, "1904-66_{0}.hdr".format(code))
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = 'PV2_{0}'.format(i + 1)
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0., 0.]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_phi = wcslibout['phi']
wcs_theta = wcslibout['theta']
model = getattr(projections, 'Pix2Sky_' + code)
tanprj = model(*params)
phi, theta = tanprj(*PIX_COORDINATES)
assert_almost_equal(np.asarray(phi), wcs_phi)
assert_almost_equal(np.asarray(theta), wcs_theta)
@pytest.mark.parametrize(('code',), pars)
def test_Sky2Pix_unit(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(MAPS_DIR, "1904-66_{0}.hdr".format(code))
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = 'PV2_{0}'.format(i + 1)
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0., 0.]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_pix = w.wcs.s2p(wcslibout['world'], 1)['pixcrd']
model = getattr(projections, 'Sky2Pix_' + code)
tinv = model(*params)
x, y = tinv(wcslibout['phi'] * u.deg, wcslibout['theta'] * u.deg)
assert_quantity_allclose(x, wcs_pix[:, 0] * u.deg)
assert_quantity_allclose(y, wcs_pix[:, 1] * u.deg)
@pytest.mark.parametrize(('code',), pars)
def test_Pix2Sky_unit(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(MAPS_DIR, "1904-66_{0}.hdr".format(code))
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = 'PV2_{0}'.format(i + 1)
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0., 0.]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_phi = wcslibout['phi']
wcs_theta = wcslibout['theta']
model = getattr(projections, 'Pix2Sky_' + code)
tanprj = model(*params)
phi, theta = tanprj(*PIX_COORDINATES * u.deg)
assert_quantity_allclose(phi, wcs_phi * u.deg)
assert_quantity_allclose(theta, wcs_theta * u.deg)
phi, theta = tanprj(*(PIX_COORDINATES * u.deg).to(u.rad))
assert_quantity_allclose(phi, wcs_phi * u.deg)
assert_quantity_allclose(theta, wcs_theta * u.deg)
phi, theta = tanprj(*(PIX_COORDINATES * u.deg).to(u.arcmin))
assert_quantity_allclose(phi, wcs_phi * u.deg)
assert_quantity_allclose(theta, wcs_theta * u.deg)
@pytest.mark.parametrize(('code',), pars)
def test_projection_default(code):
"""Check astropy model eval with default parameters"""
# Just makes sure that the default parameter values are reasonable
# and accepted by wcslib.
model = getattr(projections, 'Sky2Pix_' + code)
tinv = model()
x, y = tinv(45, 45)
model = getattr(projections, 'Pix2Sky_' + code)
tinv = model()
x, y = tinv(0, 0)
class TestZenithalPerspective:
"""Test Zenithal Perspective projection"""
def setup_class(self):
ID = 'AZP'
wcs_map = os.path.join(MAPS_DIR, "1904-66_{0}.hdr".format(ID))
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
self.wazp = wcs.WCS(header)
self.wazp.wcs.crpix = np.array([0., 0.])
self.wazp.wcs.crval = np.array([0., 0.])
self.wazp.wcs.cdelt = np.array([1., 1.])
self.pv_kw = [kw[2] for kw in self.wazp.wcs.get_pv()]
self.azp = projections.Pix2Sky_ZenithalPerspective(*self.pv_kw)
def test_AZP_p2s(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_phi = wcslibout['phi']
wcs_theta = wcslibout['theta']
phi, theta = self.azp(-10, 30)
assert_almost_equal(np.asarray(phi), wcs_phi)
assert_almost_equal(np.asarray(theta), wcs_theta)
def test_AZP_s2p(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_pix = self.wazp.wcs.s2p(wcslibout['world'], 1)['pixcrd']
x, y = self.azp.inverse(wcslibout['phi'], wcslibout['theta'])
assert_almost_equal(np.asarray(x), wcs_pix[:, 0])
assert_almost_equal(np.asarray(y), wcs_pix[:, 1])
class TestCylindricalPerspective:
"""Test cylindrical perspective projection"""
def setup_class(self):
ID = "CYP"
wcs_map = os.path.join(MAPS_DIR, "1904-66_{0}.hdr".format(ID))
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
self.wazp = wcs.WCS(header)
self.wazp.wcs.crpix = np.array([0., 0.])
self.wazp.wcs.crval = np.array([0., 0.])
self.wazp.wcs.cdelt = np.array([1., 1.])
self.pv_kw = [kw[2] for kw in self.wazp.wcs.get_pv()]
self.azp = projections.Pix2Sky_CylindricalPerspective(*self.pv_kw)
def test_CYP_p2s(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_phi = wcslibout['phi']
wcs_theta = wcslibout['theta']
phi, theta = self.azp(-10, 30)
assert_almost_equal(np.asarray(phi), wcs_phi)
assert_almost_equal(np.asarray(theta), wcs_theta)
def test_CYP_s2p(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_pix = self.wazp.wcs.s2p(wcslibout['world'], 1)['pixcrd']
x, y = self.azp.inverse(wcslibout['phi'], wcslibout['theta'])
assert_almost_equal(np.asarray(x), wcs_pix[:, 0])
assert_almost_equal(np.asarray(y), wcs_pix[:, 1])
def test_AffineTransformation2D():
# Simple test with a scale and translation
model = projections.AffineTransformation2D(
matrix=[[2, 0], [0, 2]], translation=[1, 1])
# Coordinates for vertices of a rectangle
rect = [[0, 0], [1, 0], [0, 3], [1, 3]]
x, y = zip(*rect)
new_rect = np.vstack(model(x, y)).T
assert np.all(new_rect == [[1, 1], [3, 1], [1, 7], [3, 7]])
def test_AffineTransformation2D_inverse():
# Test non-invertible model
model1 = projections.AffineTransformation2D(
matrix=[[1, 1], [1, 1]])
with pytest.raises(InputParameterError):
model1.inverse
model2 = projections.AffineTransformation2D(
matrix=[[1.2, 3.4], [5.6, 7.8]], translation=[9.1, 10.11])
# Coordinates for vertices of a rectangle
rect = [[0, 0], [1, 0], [0, 3], [1, 3]]
x, y = zip(*rect)
x_new, y_new = model2.inverse(*model2(x, y))
assert_allclose([x, y], [x_new, y_new], atol=1e-10)
def test_c_projection_striding():
# This is just a simple test to make sure that the striding is
# handled correctly in the projection C extension
coords = np.arange(10).reshape((5, 2))
model = projections.Sky2Pix_ZenithalPerspective(2, 30)
phi, theta = model(coords[:, 0], coords[:, 1])
assert_almost_equal(
phi,
[0., 2.2790416, 4.4889294, 6.6250643, 8.68301])
assert_almost_equal(
theta,
[-76.4816918, -75.3594654, -74.1256332, -72.784558, -71.3406629])
def test_c_projections_shaped():
nx, ny = (5, 2)
x = np.linspace(0, 1, nx)
y = np.linspace(0, 1, ny)
xv, yv = np.meshgrid(x, y)
model = projections.Pix2Sky_TAN()
phi, theta = model(xv, yv)
assert_allclose(
phi,
[[0., 90., 90., 90., 90.],
[180., 165.96375653, 153.43494882, 143.13010235, 135.]])
assert_allclose(
theta,
[[90., 89.75000159, 89.50001269, 89.25004283, 89.00010152],
[89.00010152, 88.96933478, 88.88210788, 88.75019826, 88.58607353]])
def test_affine_with_quantities():
x = 1
y = 2
xdeg = (x * u.pix).to(u.deg, equivalencies=u.pixel_scale(2.5 * u.deg / u.pix))
ydeg = (y * u.pix).to(u.deg, equivalencies=u.pixel_scale(2.5 * u.deg / u.pix))
xpix = x * u.pix
ypix = y * u.pix
# test affine with matrix only
qaff = projections.AffineTransformation2D(matrix=[[1, 2], [2, 1]] * u.deg)
with pytest.raises(ValueError):
qx1, qy1 = qaff(xpix, ypix, equivalencies={
'x': u.pixel_scale(2.5 * u.deg / u.pix),
'y': u.pixel_scale(2.5 * u.deg / u.pix)})
# test affine with matrix and translation
qaff = projections.AffineTransformation2D(matrix=[[1, 2], [2, 1]] * u.deg,
translation=[1, 2] * u.deg)
qx1, qy1 = qaff(xpix, ypix, equivalencies={
'x': u.pixel_scale(2.5 * u.deg / u.pix),
'y': u.pixel_scale(2.5 * u.deg / u.pix)})
aff = projections.AffineTransformation2D(matrix=[[1, 2], [2, 1]], translation=[1, 2])
x1, y1 = aff(xdeg.value, ydeg.value)
assert_quantity_allclose(qx1, x1 * u.deg)
assert_quantity_allclose(qy1, y1 * u.deg)
# test the case of WCS PC and CDELT transformations
pc = np.array([[0.86585778922708, 0.50029020461607],
[-0.50029020461607, 0.86585778922708]])
cdelt = np.array([[1, 3.0683055555556E-05], [3.0966944444444E-05, 1]])
matrix = cdelt * pc
qaff = projections.AffineTransformation2D(matrix=matrix * u.deg,
translation=[0, 0] * u.deg)
inv_matrix = np.linalg.inv(matrix)
inv_qaff = projections.AffineTransformation2D(matrix=inv_matrix * u.pix,
translation=[0, 0] * u.pix)
qaff.inverse = inv_qaff
qx1, qy1 = qaff(xpix, ypix, equivalencies={
'x': u.pixel_scale(1 * u.deg / u.pix),
'y': u.pixel_scale(1 * u.deg / u.pix)})
x1, y1 = qaff.inverse(qx1, qy1, equivalencies={
'x': u.pixel_scale(1 * u.deg / u.pix),
'y': u.pixel_scale(1 * u.deg / u.pix)})
assert_quantity_allclose(x1, xpix)
assert_quantity_allclose(y1, ypix)
|
e1ad408727495c6e2a700484f46b9ec5972d02a1c1e7b8a9c34d22930dae2b5c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides functions to help with testing against iraf tasks
"""
from astropy.logger import log
import numpy as np
iraf_models_map = {1.: 'Chebyshev',
2.: 'Legendre',
3.: 'Spline3',
4.: 'Spline1'}
def get_records(fname):
"""
Read the records of an IRAF database file into a python list
Parameters
----------
fname : str
name of an IRAF database file
Returns
-------
A list of records
"""
f = open(fname)
dtb = f.read()
f.close()
recs = dtb.split('begin')[1:]
records = [Record(r) for r in recs]
return records
def get_database_string(fname):
"""
Read an IRAF database file
Parameters
----------
fname : str
name of an IRAF database file
Returns
-------
the database file as a string
"""
f = open(fname)
dtb = f.read()
f.close()
return dtb
class Record:
"""
A base class for all records - represents an IRAF database record
Attributes
----------
recstr: string
the record as a string
fields: dict
the fields in the record
taskname: string
the name of the task which created the database file
"""
def __init__(self, recstr):
self.recstr = recstr
self.fields = self.get_fields()
self.taskname = self.get_task_name()
def aslist(self):
reclist = self.recstr.split('\n')
reclist = [l.strip() for l in reclist]
[reclist.remove(l) for l in reclist if len(l) == 0]
return reclist
def get_fields(self):
# read record fields as an array
fields = {}
flist = self.aslist()
numfields = len(flist)
for i in range(numfields):
line = flist[i]
if line and line[0].isalpha():
field = line.split()
if i + 1 < numfields:
if not flist[i + 1][0].isalpha():
fields[field[0]] = self.read_array_field(
flist[i:i + int(field[1]) + 1])
else:
fields[field[0]] = " ".join(s for s in field[1:])
else:
fields[field[0]] = " ".join(s for s in field[1:])
else:
continue
return fields
def get_task_name(self):
try:
return self.fields['task']
except KeyError:
return None
def read_array_field(self, fieldlist):
# Turn an iraf record array field into a numpy array
fieldline = [l.split() for l in fieldlist[1:]]
# take only the first 3 columns
# identify writes also strings at the end of some field lines
xyz = [l[:3] for l in fieldline]
try:
farr = np.array(xyz)
except Exception:
log.debug("Could not read array field {}".format(fieldlist[0].split()[0]))
return farr.astype(np.float64)
class IdentifyRecord(Record):
"""
Represents a database record for the onedspec.identify task
Attributes
----------
x: array
the X values of the identified features
this represents values on axis1 (image rows)
y: int
the Y values of the identified features
(image columns)
z: array
the values which X maps into
modelname: string
the function used to fit the data
nterms: int
degree of the polynomial which was fit to the data
in IRAF this is the number of coefficients, not the order
mrange: list
the range of the data
coeff: array
function (modelname) coefficients
"""
def __init__(self, recstr):
super().__init__(recstr)
self._flatcoeff = self.fields['coefficients'].flatten()
self.x = self.fields['features'][:, 0]
self.y = self.get_ydata()
self.z = self.fields['features'][:, 1]
self.modelname = self.get_model_name()
self.nterms = self.get_nterms()
self.mrange = self.get_range()
self.coeff = self.get_coeff()
def get_model_name(self):
return iraf_models_map[self._flatcoeff[0]]
def get_nterms(self):
return self._flatcoeff[1]
def get_range(self):
low = self._flatcoeff[2]
high = self._flatcoeff[3]
return [low, high]
def get_coeff(self):
return self._flatcoeff[4:]
def get_ydata(self):
image = self.fields['image']
left = image.find('[') + 1
right = image.find(']')
section = image[left:right]
if ',' in section:
yind = image.find(',') + 1
return int(image[yind:-1])
else:
return int(section)
class FitcoordsRecord(Record):
"""
Represents a database record for the longslit.fitccords task
Attributes
----------
modelname: string
the function used to fit the data
xorder: int
number of terms in x
yorder: int
number of terms in y
xbounds: list
data range in x
ybounds: list
data range in y
coeff: array
function coefficients
"""
def __init__(self, recstr):
super().__init__(recstr)
self._surface = self.fields['surface'].flatten()
self.modelname = iraf_models_map[self._surface[0]]
self.xorder = self._surface[1]
self.yorder = self._surface[2]
self.xbounds = [self._surface[4], self._surface[5]]
self.ybounds = [self._surface[6], self._surface[7]]
self.coeff = self.get_coeff()
def get_coeff(self):
return self._surface[8:]
class IDB:
"""
Base class for an IRAF identify database
Attributes
----------
records: list
a list of all `IdentifyRecord` in the database
numrecords: int
number of records
"""
def __init__(self, dtbstr):
self.records = [IdentifyRecord(rstr) for rstr in self.aslist(dtbstr)]
self.numrecords = len(self.records)
def aslist(self, dtb):
# return a list of records
# if the first one is a comment remove it from the list
rl = dtb.split('begin')
try:
rl0 = rl[0].split('\n')
except Exception:
return rl
if len(rl0) == 2 and rl0[0].startswith('#') and not rl0[1].strip():
return rl[1:]
else:
return rl
class ReidentifyRecord(IDB):
"""
Represents a database record for the onedspec.reidentify task
"""
def __init__(self, databasestr):
super().__init__(databasestr)
self.x = np.array([r.x for r in self.records])
self.y = self.get_ydata()
self.z = np.array([r.z for r in self.records])
def get_ydata(self):
y = np.ones(self.x.shape)
y = y * np.array([r.y for r in self.records])[:, np.newaxis]
return y
|
be0d5fc5c7209932d4901314b7fc6be695ab30b70181a15740714c8c7e398015 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for blackbody model and functions."""
import pytest
import numpy as np
from astropy.modeling.blackbody import BlackBody1D, blackbody_nu, blackbody_lambda, FNU
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.tests.helper import assert_quantity_allclose, catch_warnings
from astropy import constants as const
from astropy import units as u
from astropy.utils.exceptions import AstropyUserWarning
try:
from scipy import optimize, integrate # noqa
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
__doctest_skip__ = ['*']
class TestBlackbody1D:
# Make sure the temperature equivalency automatically applies by trying
# to pass temperatures in celsius
@pytest.mark.parametrize('temperature', (3000 * u.K, 2726.85 * u.deg_C))
def test_evaluate(self, temperature):
bolometric_flux = 1000 * u.L_sun / (4 * np.pi * (1.5 * u.pc) ** 2)
b = BlackBody1D(temperature=temperature,
bolometric_flux=bolometric_flux)
assert_quantity_allclose(b(1.4 * u.micron), 4734464.498937388 * u.Jy)
assert_quantity_allclose(b(214.13747 * u.THz), 4734464.498937388 * u.Jy)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit(self):
fitter = LevMarLSQFitter()
b = BlackBody1D(3000 * u.K)
wav = np.array([0.5, 5, 10]) * u.micron
fnu = np.array([1, 10, 5]) * u.Jy
b_fit = fitter(b, wav, fnu)
assert_quantity_allclose(b_fit.temperature, 2840.744774408546 * u.K)
assert_quantity_allclose(b_fit.bolometric_flux, 6.821837296857152e-08 * u.erg / u.cm**2 / u.s)
@pytest.mark.skipif('not HAS_SCIPY')
def test_blackbody_scipy():
"""Test Planck function.
.. note:: Needs ``scipy`` to work.
"""
flux_unit = u.Watt / (u.m ** 2 * u.um)
wave = np.logspace(0, 8, 100000) * u.AA
temp = 100. * u.K
with np.errstate(all='ignore'):
bb_nu = blackbody_nu(wave, temp) * u.sr
flux = bb_nu.to(flux_unit, u.spectral_density(wave)) / u.sr
lum = wave.to(u.um)
intflux = integrate.trapz(flux.value, x=lum.value)
ans = const.sigma_sb * temp ** 4 / np.pi
np.testing.assert_allclose(intflux, ans.value, rtol=0.01) # 1% accuracy
def test_blackbody_overflow():
"""Test Planck function with overflow."""
photlam = u.photon / (u.cm**2 * u.s * u.AA)
wave = [0, 1000.0, 100000.0, 1e55] # Angstrom
temp = 10000.0 # Kelvin
with np.errstate(all='ignore'):
bb_lam = blackbody_lambda(wave, temp) * u.sr
flux = bb_lam.to(photlam, u.spectral_density(wave * u.AA)) / u.sr
# First element is NaN, last element is very small, others normal
assert np.isnan(flux[0])
assert np.log10(flux[-1].value) < -134
np.testing.assert_allclose(
flux.value[1:-1], [3.38131732e+16, 3.87451317e+15],
rtol=1e-3) # 0.1% accuracy in PHOTLAM/sr
with np.errstate(all='ignore'):
flux = blackbody_lambda(1, 1e4)
assert flux.value == 0
def test_blackbody_synphot():
"""Test that it is consistent with IRAF SYNPHOT BBFUNC."""
# Solid angle of solar radius at 1 kpc
fac = np.pi * (const.R_sun / const.kpc) ** 2 * u.sr
with np.errstate(all='ignore'):
flux = blackbody_nu([100, 1, 1000, 1e4, 1e5] * u.AA, 5000) * fac
assert flux.unit == FNU
# Special check for overflow value (SYNPHOT gives 0)
assert np.log10(flux[0].value) < -143
np.testing.assert_allclose(
flux.value[1:], [0, 2.01950807e-34, 3.78584515e-26, 1.90431881e-27],
rtol=0.01) # 1% accuracy
def test_blackbody_exceptions_and_warnings():
"""Test exceptions."""
# Negative temperature
with pytest.raises(ValueError) as exc:
blackbody_nu(1000 * u.AA, -100)
assert exc.value.args[0] == 'Temperature should be positive: -100.0 K'
# Zero wavelength given for conversion to Hz
with catch_warnings(AstropyUserWarning) as w:
blackbody_nu(0 * u.AA, 5000)
assert len(w) == 1
assert 'invalid' in w[0].message.args[0]
# Negative wavelength given for conversion to Hz
with catch_warnings(AstropyUserWarning) as w:
blackbody_nu(-1. * u.AA, 5000)
assert len(w) == 1
assert 'invalid' in w[0].message.args[0]
def test_blackbody_array_temperature():
"""Regression test to make sure that the temperature can be an array."""
flux = blackbody_nu(1.2 * u.mm, [100, 200, 300] * u.K)
np.testing.assert_allclose(
flux.value, [1.804908e-12, 3.721328e-12, 5.638513e-12], rtol=1e-5)
flux = blackbody_nu([2, 4, 6] * u.mm, [100, 200, 300] * u.K)
np.testing.assert_allclose(
flux.value, [6.657915e-13, 3.420677e-13, 2.291897e-13], rtol=1e-5)
flux = blackbody_nu(np.ones((3, 4)) * u.mm, np.ones(4) * u.K)
assert flux.shape == (3, 4)
|
ca251c3c5844911398e043d86415ae8c59b6ab3353204bc5acf3e41fc2ca1403 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Here are all the test parameters and values for the each
`~astropy.modeling.FittableModel` defined. There is a dictionary for 1D and a
dictionary for 2D models.
Explanation of keywords of the dictionaries:
"parameters" : list or dict
Model parameters, the model is tested with. Make sure you keep the right
order. For polynomials you can also use a dict to specify the
coefficients. See examples below.
"x_values" : list
x values where the model is evaluated.
"y_values" : list
Reference y values for the in x_values given positions.
"z_values" : list
Reference z values for the in x_values and y_values given positions.
(2D model option)
"x_lim" : list
x test range for the model fitter. Depending on the model this can differ
e.g. the PowerLaw model should be tested over a few magnitudes.
"y_lim" : list
y test range for the model fitter. Depending on the model this can differ
e.g. the PowerLaw model should be tested over a few magnitudes. (2D model
option)
"log_fit" : bool
PowerLaw models should be tested over a few magnitudes. So log_fit should
be true.
"requires_scipy" : bool
If a model requires scipy (Bessel functions etc.) set this flag.
"integral" : float
Approximate value of the integral in the range x_lim (and y_lim).
"deriv_parameters" : list
If given the test of the derivative will use these parameters to create a
model (optional)
"deriv_initial" : list
If given the test of the derivative will use these parameters as initial
values for the fit (optional)
"""
from astropy.modeling.functional_models import (
Gaussian1D, Sine1D, Box1D, Linear1D, Lorentz1D,
MexicanHat1D, Trapezoid1D, Const1D, Moffat1D,
Gaussian2D, Const2D, Box2D, MexicanHat2D,
TrapezoidDisk2D, AiryDisk2D, Moffat2D, Disk2D,
Ring2D, Sersic1D, Sersic2D, Voigt1D, Planar2D)
from astropy.modeling.polynomial import Polynomial1D, Polynomial2D
from astropy.modeling.powerlaws import (
PowerLaw1D, BrokenPowerLaw1D, SmoothlyBrokenPowerLaw1D, ExponentialCutoffPowerLaw1D,
LogParabola1D)
import numpy as np
# 1D Models
models_1D = {
Gaussian1D: {
'parameters': [1, 0, 1],
'x_values': [0, np.sqrt(2), -np.sqrt(2)],
'y_values': [1.0, 0.367879, 0.367879],
'x_lim': [-10, 10],
'integral': np.sqrt(2 * np.pi)
},
Sine1D: {
'parameters': [1, 0.1, 0],
'x_values': [0, 2.5],
'y_values': [0, 1],
'x_lim': [-10, 10],
'integral': 0
},
Box1D: {
'parameters': [1, 0, 10],
'x_values': [-5, 5, 0, -10, 10],
'y_values': [1, 1, 1, 0, 0],
'x_lim': [-10, 10],
'integral': 10
},
Linear1D: {
'parameters': [1, 0],
'x_values': [0, np.pi, 42, -1],
'y_values': [0, np.pi, 42, -1],
'x_lim': [-10, 10],
'integral': 0
},
Lorentz1D: {
'parameters': [1, 0, 1],
'x_values': [0, -1, 1, 0.5, -0.5],
'y_values': [1., 0.2, 0.2, 0.5, 0.5],
'x_lim': [-10, 10],
'integral': 1
},
MexicanHat1D: {
'parameters': [1, 0, 1],
'x_values': [0, 1, -1, 3, -3],
'y_values': [1.0, 0.0, 0.0, -0.088872, -0.088872],
'x_lim': [-20, 20],
'integral': 0
},
Trapezoid1D: {
'parameters': [1, 0, 2, 1],
'x_values': [0, 1, -1, 1.5, -1.5, 2, 2],
'y_values': [1, 1, 1, 0.5, 0.5, 0, 0],
'x_lim': [-10, 10],
'integral': 3
},
Const1D: {
'parameters': [1],
'x_values': [-1, 1, np.pi, -42., 0],
'y_values': [1, 1, 1, 1, 1],
'x_lim': [-10, 10],
'integral': 20
},
Moffat1D: {
'parameters': [1, 0, 1, 2],
'x_values': [0, 1, -1, 3, -3],
'y_values': [1.0, 0.25, 0.25, 0.01, 0.01],
'x_lim': [-10, 10],
'integral': 1,
'deriv_parameters': [23.4, 1.2, 2.1, 2.3],
'deriv_initial': [10, 1, 1, 1]
},
PowerLaw1D: {
'parameters': [1, 1, 2],
'constraints': {'fixed': {'x_0': True}},
'x_values': [1, 10, 100],
'y_values': [1.0, 0.01, 0.0001],
'x_lim': [1, 10],
'log_fit': True,
'integral': 0.99
},
BrokenPowerLaw1D: {
'parameters': [1, 1, 2, 3],
'constraints': {'fixed': {'x_break': True}},
'x_values': [0.1, 1, 10, 100],
'y_values': [1e2, 1.0, 1e-3, 1e-6],
'x_lim': [0.1, 100],
'log_fit': True
},
SmoothlyBrokenPowerLaw1D: {
'parameters': [1, 1, -2, 2, 0.5],
'constraints': {'fixed': {'x_break': True, 'delta': True}},
'x_values': [0.01, 1, 100],
'y_values': [3.99920012e-04, 1.0, 3.99920012e-04],
'x_lim': [0.01, 100],
'log_fit': True
},
ExponentialCutoffPowerLaw1D: {
'parameters': [1, 1, 2, 3],
'constraints': {'fixed': {'x_0': True}},
'x_values': [0.1, 1, 10, 100],
'y_values': [9.67216100e+01, 7.16531311e-01, 3.56739933e-04,
3.33823780e-19],
'x_lim': [0.01, 100],
'log_fit': True
},
LogParabola1D: {
'parameters': [1, 2, 3, 0.1],
'constraints': {'fixed': {'x_0': True}},
'x_values': [0.1, 1, 10, 100],
'y_values': [3.26089063e+03, 7.62472488e+00, 6.17440488e-03,
1.73160572e-06],
'x_lim': [0.1, 100],
'log_fit': True
},
Polynomial1D: {
'parameters': {'degree': 2, 'c0': 1., 'c1': 1., 'c2': 1.},
'x_values': [1, 10, 100],
'y_values': [3, 111, 10101],
'x_lim': [-3, 3]
},
Sersic1D: {
'parameters': [1, 20, 4],
'x_values': [0.1, 1, 10, 100],
'y_values': [2.78629391e+02, 5.69791430e+01, 3.38788244e+00,
2.23941982e-02],
'requires_scipy': True,
'x_lim': [0, 10],
'log_fit': True
},
Voigt1D: {
'parameters': [0, 1, 0.5, 0.9],
'x_values': [0, 2, 4, 8, 10],
'y_values': [0.520935, 0.017205, 0.003998, 0.000983, 0.000628],
'x_lim': [-3, 3]
}
}
# 2D Models
models_2D = {
Gaussian2D: {
'parameters': [1, 0, 0, 1, 1],
'constraints': {'fixed': {'theta': True}},
'x_values': [0, np.sqrt(2), -np.sqrt(2)],
'y_values': [0, np.sqrt(2), -np.sqrt(2)],
'z_values': [1, 1. / np.exp(1) ** 2, 1. / np.exp(1) ** 2],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': 2 * np.pi,
'deriv_parameters': [137., 5.1, 5.4, 1.5, 2., np.pi/4],
'deriv_initial': [10, 5, 5, 4, 4, .5]
},
Const2D: {
'parameters': [1],
'x_values': [-1, 1, np.pi, -42., 0],
'y_values': [0, 1, 42, np.pi, -1],
'z_values': [1, 1, 1, 1, 1],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': 400
},
Box2D: {
'parameters': [1, 0, 0, 10, 10],
'x_values': [-5, 5, -5, 5, 0, -10, 10],
'y_values': [-5, 5, 0, 0, 0, -10, 10],
'z_values': [1, 1, 1, 1, 1, 0, 0],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': 100
},
MexicanHat2D: {
'parameters': [1, 0, 0, 1],
'x_values': [0, 0, 0, 0, 0, 1, -1, 3, -3],
'y_values': [0, 1, -1, 3, -3, 0, 0, 0, 0],
'z_values': [1.0, 0.303265, 0.303265, -0.038881, -0.038881,
0.303265, 0.303265, -0.038881, -0.038881],
'x_lim': [-10, 11],
'y_lim': [-10, 11],
'integral': 0
},
TrapezoidDisk2D: {
'parameters': [1, 0, 0, 1, 1],
'x_values': [0, 0.5, 0, 1.5],
'y_values': [0, 0.5, 1.5, 0],
'z_values': [1, 1, 0.5, 0.5],
'x_lim': [-3, 3],
'y_lim': [-3, 3]
},
AiryDisk2D: {
'parameters': [7, 0, 0, 10],
'x_values': [0, 1, -1, -0.5, -0.5],
'y_values': [0, -1, 0.5, 0.5, -0.5],
'z_values': [7., 6.50158267, 6.68490643, 6.87251093, 6.87251093],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'requires_scipy': True
},
Moffat2D: {
'parameters': [1, 0, 0, 1, 2],
'x_values': [0, 1, -1, 3, -3],
'y_values': [0, -1, 3, 1, -3],
'z_values': [1.0, 0.111111, 0.008264, 0.008264, 0.00277],
'x_lim': [-3, 3],
'y_lim': [-3, 3]
},
Polynomial2D: {
'parameters': {'degree': 1, 'c0_0': 1., 'c1_0': 1., 'c0_1': 1.},
'x_values': [1, 2, 3],
'y_values': [1, 3, 2],
'z_values': [3, 6, 6],
'x_lim': [1, 100],
'y_lim': [1, 100]
},
Disk2D: {
'parameters': [1, 0, 0, 5],
'x_values': [-5, 5, -5, 5, 0, -10, 10],
'y_values': [-5, 5, 0, 0, 0, -10, 10],
'z_values': [0, 0, 1, 1, 1, 0, 0],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': np.pi * 5 ** 2
},
Ring2D: {
'parameters': [1, 0, 0, 5, 5],
'x_values': [-5, 5, -5, 5, 0, -10, 10],
'y_values': [-5, 5, 0, 0, 0, -10, 10],
'z_values': [1, 1, 1, 1, 0, 0, 0],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': np.pi * (10 ** 2 - 5 ** 2)
},
Sersic2D: {
'parameters': [1, 25, 4, 50, 50, 0.5, -1],
'x_values': [0.0, 1, 10, 100],
'y_values': [1, 100, 0.0, 10],
'z_values': [1.686398e-02, 9.095221e-02, 2.341879e-02, 9.419231e-02],
'requires_scipy': True,
'x_lim': [1, 1e10],
'y_lim': [1, 1e10]
},
Planar2D: {
'parameters': [1, 1, 0],
'x_values': [0, np.pi, 42, -1],
'y_values': [np.pi, 0, -1, 42],
'z_values': [np.pi, np.pi, 41, 41],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': 0
}
}
|
55c52f98ab6fe185e95b28740823c4c63e6f5b3849850efb7c619e1b84bbe480 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Module to test fitting routines
"""
import os.path
import pytest
import numpy as np
from numpy import linalg
from numpy.testing import assert_allclose, assert_almost_equal
from unittest import mock
from . import irafutil
from astropy.modeling import models
from astropy.modeling.core import Fittable2DModel, Parameter
from astropy.modeling.fitting import *
from astropy.utils import NumpyRNGContext
from astropy.utils.data import get_pkg_data_filename
from .utils import ignore_non_integer_warning
from astropy.stats import sigma_clip
from astropy.utils.exceptions import AstropyUserWarning
from astropy.modeling.fitting import populate_entry_points
import warnings
try:
from scipy import optimize
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
try:
from pkg_resources import EntryPoint
HAS_PKG = True
except ImportError:
HAS_PKG = False
fitters = [SimplexLSQFitter, SLSQPLSQFitter]
_RANDOM_SEED = 0x1337
class TestPolynomial2D:
"""Tests for 2D polynomail fitting."""
def setup_class(self):
self.model = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
def poly2(x, y):
return 1 + 2 * x + 3 * x ** 2 + 4 * y + 5 * y ** 2 + 6 * x * y
self.z = poly2(self.x, self.y)
self.fitter = LinearLSQFitter()
def test_poly2D_fitting(self):
v = self.model.fit_deriv(x=self.x, y=self.y)
p = linalg.lstsq(v, self.z.flatten(), rcond=-1)[0]
new_model = self.fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, p)
def test_eval(self):
new_model = self.fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model(self.x, self.y), self.z)
@pytest.mark.skipif('not HAS_SCIPY')
def test_polynomial2D_nonlinear_fitting(self):
self.model.parameters = [.6, 1.8, 2.9, 3.7, 4.9, 6.7]
nlfitter = LevMarLSQFitter()
new_model = nlfitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, [1, 2, 3, 4, 5, 6])
class TestICheb2D:
"""
Tests 2D Chebyshev polynomial fitting
Create a 2D polynomial (z) using Polynomial2DModel and default coefficients
Fit z using a ICheb2D model
Evaluate the ICheb2D polynomial and compare with the initial z
"""
def setup_class(self):
self.pmodel = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
self.z = self.pmodel(self.x, self.y)
self.cheb2 = models.Chebyshev2D(2, 2)
self.fitter = LinearLSQFitter()
def test_default_params(self):
self.cheb2.parameters = np.arange(9)
p = np.array([1344., 1772., 400., 1860., 2448., 552., 432., 568.,
128.])
z = self.cheb2(self.x, self.y)
model = self.fitter(self.cheb2, self.x, self.y, z)
assert_almost_equal(model.parameters, p)
def test_poly2D_cheb2D(self):
model = self.fitter(self.cheb2, self.x, self.y, self.z)
z1 = model(self.x, self.y)
assert_almost_equal(self.z, z1)
@pytest.mark.skipif('not HAS_SCIPY')
def test_chebyshev2D_nonlinear_fitting(self):
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
nlfitter = LevMarLSQFitter()
model = nlfitter(cheb2d, self.x, self.y, z)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
def test_chebyshev2D_nonlinear_fitting_with_weights(self):
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
nlfitter = LevMarLSQFitter()
weights = np.ones_like(self.y)
model = nlfitter(cheb2d, self.x, self.y, z, weights=weights)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
class TestJointFitter:
"""
Tests the joint fitting routine using 2 gaussian models
"""
def setup_class(self):
"""
Create 2 gaussian models and some data with noise.
Create a fitter for the two models keeping the amplitude parameter
common for the two models.
"""
self.g1 = models.Gaussian1D(10, mean=14.9, stddev=.3)
self.g2 = models.Gaussian1D(10, mean=13, stddev=.4)
self.jf = JointFitter([self.g1, self.g2],
{self.g1: ['amplitude'],
self.g2: ['amplitude']}, [9.8])
self.x = np.arange(10, 20, .1)
y1 = self.g1(self.x)
y2 = self.g2(self.x)
with NumpyRNGContext(_RANDOM_SEED):
n = np.random.randn(100)
self.ny1 = y1 + 2 * n
self.ny2 = y2 + 2 * n
self.jf(self.x, self.ny1, self.x, self.ny2)
def test_joint_parameter(self):
"""
Tests that the amplitude of the two models is the same
"""
assert_allclose(self.jf.fitparams[0], self.g1.parameters[0])
assert_allclose(self.jf.fitparams[0], self.g2.parameters[0])
def test_joint_fitter(self):
"""
Tests the fitting routine with similar procedure.
Compares the fitted parameters.
"""
p1 = [14.9, .3]
p2 = [13, .4]
A = 9.8
p = np.r_[A, p1, p2]
def model(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errfunc(p, x1, y1, x2, y2):
return np.ravel(np.r_[model(p[0], p[1:3], x1) - y1,
model(p[0], p[3:], x2) - y2])
coeff, _ = optimize.leastsq(errfunc, p,
args=(self.x, self.ny1, self.x, self.ny2))
assert_allclose(coeff, self.jf.fitparams, rtol=10 ** (-2))
class TestLinearLSQFitter:
def test_compound_model_raises_error(self):
"""Test that if an user tries to use a compound model, raises an error"""
with pytest.raises(ValueError) as excinfo:
init_model1 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model2 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model_comp = init_model1 + init_model2
x = np.arange(10)
y = init_model_comp(x, model_set_axis=False)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model_comp, x, y)
assert "Model must be simple, not compound" in str(excinfo.value)
def test_chebyshev1D(self):
"""Tests fitting a 1D Chebyshev polynomial to some real world data."""
test_file = get_pkg_data_filename(os.path.join('data',
'idcompspec.fits'))
with open(test_file) as f:
lines = f.read()
reclist = lines.split('begin')
record = irafutil.IdentifyRecord(reclist[1])
coeffs = record.coeff
order = int(record.fields['order'])
initial_model = models.Chebyshev1D(order - 1,
domain=record.get_range())
fitter = LinearLSQFitter()
fitted_model = fitter(initial_model, record.x, record.z)
assert_allclose(fitted_model.parameters, np.array(coeffs),
rtol=10e-2)
def test_linear_fit_model_set(self):
"""Tests fitting multiple models simultaneously."""
init_model = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected,
rtol=1e-1)
def test_linear_fit_2d_model_set(self):
"""Tests fitted multiple 2-D models simultaneously."""
init_model = models.Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)
x = np.arange(10)
y = np.arange(10)
z_expected = init_model(x, y, model_set_axis=False)
assert z_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
z = z_expected + np.random.normal(0, 0.01, size=z_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model(x, y, model_set_axis=False), z_expected,
rtol=1e-1)
def test_linear_fit_fixed_parameter(self):
"""
Tests fitting a polynomial model with a fixed parameter (issue #6135).
"""
init_model = models.Polynomial1D(degree=2, c1=1)
init_model.c1.fixed = True
x = np.arange(10)
y = 2 + x + 0.5*x*x
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [2., 1., 0.5], atol=1e-14)
def test_linear_fit_model_set_fixed_parameter(self):
"""
Tests fitting a polynomial model set with a fixed parameter (#6135).
"""
init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)
init_model.c1.fixed = True
x = np.arange(10)
yy = np.array([2 + x + 0.5*x*x, -2*x])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.c0, [2., 0.], atol=1e-14)
assert_allclose(fitted_model.c1, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c2, [0.5, 0.], atol=1e-14)
def test_linear_fit_2d_model_set_fixed_parameters(self):
"""
Tests fitting a 2d polynomial model set with fixed parameters (#6135).
"""
init_model = models.Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
def test_linear_fit_model_set_masked_values(self):
"""
Tests model set fitting with masked value(s) (#4824, #6819).
"""
# NB. For single models, there is an equivalent doctest.
init_model = models.Polynomial1D(degree=1, n_models=2)
x = np.arange(10)
y = np.ma.masked_array([2*x+1, x-2], mask=np.zeros_like([x, x]))
y[0, 7] = 100. # throw off fit coefficients if unmasked
y.mask[0, 7] = True
y[1, 1:3] = -100.
y.mask[1, 1:3] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.c0, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c1, [2., 1.], atol=1e-14)
def test_linear_fit_2d_model_set_masked_values(self):
"""
Tests 2D model set fitting with masked value(s) (#4824, #6819).
"""
init_model = models.Polynomial2D(1, n_models=2)
x, y = np.mgrid[0:5, 0:5]
z = np.ma.masked_array([2*x+3*y+1, x-0.5*y-2],
mask=np.zeros_like([x, x]))
z[0, 3, 1] = -1000. # throw off fit coefficients if unmasked
z.mask[0, 3, 1] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model.c0_0, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c1_0, [2., 1.], atol=1e-14)
assert_allclose(fitted_model.c0_1, [3., -0.5], atol=1e-14)
@pytest.mark.skipif('not HAS_SCIPY')
class TestNonLinearFitters:
"""Tests non-linear least squares fitting and the SLSQP algorithm."""
def setup_class(self):
self.initial_values = [100, 5, 1]
self.xdata = np.arange(0, 10, 0.1)
sigma = 4. * np.ones_like(self.xdata)
with NumpyRNGContext(_RANDOM_SEED):
yerror = np.random.normal(0, sigma)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
self.ydata = func(self.initial_values, self.xdata) + yerror
self.gauss = models.Gaussian1D(100, 5, stddev=1)
def test_estimated_vs_analytic_deriv(self):
"""
Runs `LevMarLSQFitter` with estimated and analytic derivatives of a
`Gaussian1D`.
"""
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
efitter = LevMarLSQFitter()
emodel = efitter(g1e, self.xdata, self.ydata, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
def test_estimated_vs_analytic_deriv_with_weights(self):
"""
Runs `LevMarLSQFitter` with estimated and analytic derivatives of a
`Gaussian1D`.
"""
weights = 1.0 / (self.ydata / 10.)
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata, weights=weights)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
efitter = LevMarLSQFitter()
emodel = efitter(g1e, self.xdata, self.ydata, weights=weights, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
def test_with_optimize(self):
"""
Tests results from `LevMarLSQFitter` against `scipy.optimize.leastsq`.
"""
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errfunc(p, x, y):
return func(p, x) - y
result = optimize.leastsq(errfunc, self.initial_values,
args=(self.xdata, self.ydata))
assert_allclose(model.parameters, result[0], rtol=10 ** (-3))
def test_with_weights(self):
"""
Tests results from `LevMarLSQFitter` with weights.
"""
# part 1: weights are equal to 1
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=np.ones_like(self.xdata))
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
# part 2: weights are 0 or 1 (effectively, they are a mask)
weights = np.zeros_like(self.xdata)
weights[::2] = 1.
mask = weights >= 1.
model = fitter(self.gauss, self.xdata[mask], self.ydata[mask],
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=weights)
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
@pytest.mark.parametrize('fitter_class', fitters)
def test_fitter_against_LevMar(self, fitter_class):
"""Tests results from non-linear fitters against `LevMarLSQFitter`."""
levmar = LevMarLSQFitter()
fitter = fitter_class()
with ignore_non_integer_warning():
new_model = fitter(self.gauss, self.xdata, self.ydata)
model = levmar(self.gauss, self.xdata, self.ydata)
assert_allclose(model.parameters, new_model.parameters,
rtol=10 ** (-4))
def test_LSQ_SLSQP_with_constraints(self):
"""
Runs `LevMarLSQFitter` and `SLSQPLSQFitter` on a model with
constraints.
"""
g1 = models.Gaussian1D(100, 5, stddev=1)
g1.mean.fixed = True
fitter = LevMarLSQFitter()
fslsqp = SLSQPLSQFitter()
with ignore_non_integer_warning():
slsqp_model = fslsqp(g1, self.xdata, self.ydata)
model = fitter(g1, self.xdata, self.ydata)
assert_allclose(model.parameters, slsqp_model.parameters,
rtol=10 ** (-4))
def test_simplex_lsq_fitter(self):
"""A basic test for the `SimplexLSQ` fitter."""
class Rosenbrock(Fittable2DModel):
a = Parameter()
b = Parameter()
@staticmethod
def evaluate(x, y, a, b):
return (a - x) ** 2 + b * (y - x ** 2) ** 2
x = y = np.linspace(-3.0, 3.0, 100)
with NumpyRNGContext(_RANDOM_SEED):
z = Rosenbrock.evaluate(x, y, 1.0, 100.0)
z += np.random.normal(0., 0.1, size=z.shape)
fitter = SimplexLSQFitter()
r_i = Rosenbrock(1, 100)
r_f = fitter(r_i, x, y, z)
assert_allclose(r_f.parameters, [1.0, 100.0], rtol=1e-2)
def test_param_cov(self):
"""
Tests that the 'param_cov' fit_info entry gets the right answer for
*linear* least squares, where the answer is exact
"""
a = 2
b = 100
with NumpyRNGContext(_RANDOM_SEED):
x = np.linspace(0, 1, 100)
# y scatter is amplitude ~1 to make sure covarience is
# non-negligible
y = x*a + b + np.random.randn(len(x))
# first compute the ordinary least squares covariance matrix
X = np.matrix(np.vstack([x, np.ones(len(x))]).T)
beta = np.linalg.inv(X.T * X) * X.T * np.matrix(y).T
s2 = np.sum((y - (X * beta).A.ravel())**2) / (len(y) - len(beta))
olscov = np.linalg.inv(X.T * X) * s2
# now do the non-linear least squares fit
mod = models.Linear1D(a, b)
fitter = LevMarLSQFitter()
fmod = fitter(mod, x, y)
assert_allclose(fmod.parameters, beta.A.ravel())
assert_allclose(olscov, fitter.fit_info['param_cov'])
@pytest.mark.skipif('not HAS_PKG')
class TestEntryPoint:
"""Tests population of fitting with entry point fitters"""
def setup_class(self):
self.exception_not_thrown = Exception("The test should not have gotten here. There was no exception thrown")
def successfulimport(self):
# This should work
class goodclass(Fitter):
__name__ = "GoodClass"
return goodclass
def raiseimporterror(self):
# This should fail as it raises an Import Error
raise ImportError
def returnbadfunc(self):
def badfunc():
# This should import but it should fail type check
pass
return badfunc
def returnbadclass(self):
# This should import But it should fail subclass type check
class badclass:
pass
return badclass
def test_working(self):
"""This should work fine"""
mock_entry_working = mock.create_autospec(EntryPoint)
mock_entry_working.name = "Working"
mock_entry_working.load = self.successfulimport
populate_entry_points([mock_entry_working])
def test_import_error(self):
"""This raises an import error on load to test that it is handled correctly"""
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_importerror = mock.create_autospec(EntryPoint)
mock_entry_importerror.name = "IErr"
mock_entry_importerror.load = self.raiseimporterror
populate_entry_points([mock_entry_importerror])
except AstropyUserWarning as w:
if "ImportError" in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
def test_bad_func(self):
"""This returns a function which fails the type check"""
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_badfunc = mock.create_autospec(EntryPoint)
mock_entry_badfunc.name = "BadFunc"
mock_entry_badfunc.load = self.returnbadfunc
populate_entry_points([mock_entry_badfunc])
except AstropyUserWarning as w:
if "Class" in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
def test_bad_class(self):
"""This returns a class which doesn't inherient from fitter """
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_badclass = mock.create_autospec(EntryPoint)
mock_entry_badclass.name = "BadClass"
mock_entry_badclass.load = self.returnbadclass
populate_entry_points([mock_entry_badclass])
except AstropyUserWarning as w:
if 'modeling.Fitter' in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
@pytest.mark.skipif('not HAS_SCIPY')
class Test1DFittingWithOutlierRemoval:
def setup_class(self):
self.x = np.linspace(-5., 5., 200)
self.model_params = (3.0, 1.3, 0.8)
def func(p, x):
return p[0]*np.exp(-0.5*(x - p[1])**2/p[2]**2)
self.y = func(self.model_params, self.x)
def test_with_fitters_and_sigma_clip(self):
import scipy.stats as stats
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.x.shape)
self.y += (np.random.normal(0., 0.2, self.x.shape) +
c*np.random.normal(3.0, 5.0, self.x.shape))
g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
# test with Levenberg-Marquardt Least Squares fitter
fit = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
# test with Sequential Least Squares Programming fitter
fit = FittingWithOutlierRemoval(SLSQPLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
# test with Simplex LSQ fitter
fit = FittingWithOutlierRemoval(SimplexLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, atol=1e-1)
@pytest.mark.skipif('not HAS_SCIPY')
class Test2DFittingWithOutlierRemoval:
def setup_class(self):
self.y, self.x = np.mgrid[-3:3:128j, -3:3:128j]
self.model_params = (3.0, 1.0, 0.0, 0.8, 0.8)
def Gaussian_2D(p, pos):
return p[0]*np.exp(-0.5*(pos[0] - p[2])**2 / p[4]**2 -
0.5*(pos[1] - p[1])**2 / p[3]**2)
self.z = Gaussian_2D(self.model_params, np.array([self.y, self.x]))
def initial_guess(self, data, pos):
y = pos[0]
x = pos[1]
"""computes the centroid of the data as the initial guess for the
center position"""
wx = x * data
wy = y * data
total_intensity = np.sum(data)
x_mean = np.sum(wx) / total_intensity
y_mean = np.sum(wy) / total_intensity
x_to_pixel = x[0].size / (x[x[0].size - 1][x[0].size - 1] - x[0][0])
y_to_pixel = y[0].size / (y[y[0].size - 1][y[0].size - 1] - y[0][0])
x_pos = np.around(x_mean * x_to_pixel + x[0].size / 2.).astype(int)
y_pos = np.around(y_mean * y_to_pixel + y[0].size / 2.).astype(int)
amplitude = data[y_pos][x_pos]
return amplitude, x_mean, y_mean
def test_with_fitters_and_sigma_clip(self):
import scipy.stats as stats
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.z.shape)
self.z += (np.random.normal(0., 0.2, self.z.shape) +
c*np.random.normal(self.z, 2.0, self.z.shape))
guess = self.initial_guess(self.z, np.array([self.y, self.x]))
g2_init = models.Gaussian2D(amplitude=guess[0], x_mean=guess[1],
y_mean=guess[2], x_stddev=0.75,
y_stddev=1.25)
# test with Levenberg-Marquardt Least Squares fitter
fit = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fitted_model, _ = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
# test with Sequential Least Squares Programming fitter
fit = FittingWithOutlierRemoval(SLSQPLSQFitter(), sigma_clip, niter=3,
sigma=3.)
fitted_model, _ = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
# test with Simplex LSQ fitter
fit = FittingWithOutlierRemoval(SimplexLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fitted_model, _ = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
def test_1d_set_fitting_with_outlier_removal():
"""Test model set fitting with outlier removal (issue #6819)"""
poly_set = models.Polynomial1D(2, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(),
sigma_clip, sigma=2.5, niter=3,
cenfunc=np.ma.mean, stdfunc=np.ma.std)
x = np.arange(10)
y = np.array([2.5*x - 4, 2*x*x + x + 10])
y[1,5] = -1000 # outlier
poly_set, filt_y = fitter(poly_set, x, y)
assert_allclose(poly_set.c0, [-4., 10.], atol=1e-14)
assert_allclose(poly_set.c1, [2.5, 1.], atol=1e-14)
assert_allclose(poly_set.c2, [0., 2.], atol=1e-14)
def test_2d_set_axis_2_fitting_with_outlier_removal():
"""Test fitting 2D model set (axis 2) with outlier removal (issue #6819)"""
poly_set = models.Polynomial2D(1, n_models=2, model_set_axis=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(),
sigma_clip, sigma=2.5, niter=3,
cenfunc=np.ma.mean, stdfunc=np.ma.std)
y, x = np.mgrid[0:5, 0:5]
z = np.rollaxis(np.array([x+y, 1-0.1*x+0.2*y]), 0, 3)
z[3,3:5,0] = 100. # outliers
poly_set, filt_z = fitter(poly_set, x, y, z)
assert_allclose(poly_set.c0_0, [[[0., 1.]]], atol=1e-14)
assert_allclose(poly_set.c1_0, [[[1., -0.1]]], atol=1e-14)
assert_allclose(poly_set.c0_1, [[[1., 0.2]]], atol=1e-14)
@pytest.mark.skipif('not HAS_SCIPY')
class TestWeightedFittingWithOutlierRemoval:
"""Issue #7020 """
def setup_class(self):
# values of x,y not important as we fit y(x,y) = p0 model here
self.y, self.x = np.mgrid[0:20, 0:20]
self.z = np.mod(self.x + self.y, 2) * 2 - 1 # -1,1 chessboard
self.weights = np.mod(self.x + self.y, 2) * 2 + 1 # 1,3 chessboard
self.z[0,0] = 1000.0 # outlier
self.z[0,1] = 1000.0 # outlier
self.x1d = self.x.flatten()
self.z1d = self.z.flatten()
self.weights1d = self.weights.flatten()
def test_1d_without_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d)
assert_allclose(fit.parameters[0], self.z1d.mean(), atol=10**(-2))
def test_1d_without_weights_with_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, mask = fitter(model, self.x1d, self.z1d)
assert((~mask).sum() == self.z1d.size - 2)
assert(mask[0] and mask[1])
assert_allclose(fit.parameters[0], 0.0, atol=10**(-2)) # with removed outliers mean is 0.0
def test_1d_with_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_1d_with_weights_with_sigma_clip(self):
"""smoke test for #7020 - fails without fitting.py patch because weights does not propagate"""
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, filtered = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
assert(fit.parameters[0] < 1.0) # outliers didn't pull it out of [-1:1] because they had been removed
def test_1d_set_with_common_weights_with_sigma_clip(self):
"""added for #6819 (1D model set with weights in common)"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
z1d = np.array([self.z1d, self.z1d])
fit, filtered = fitter(model, self.x1d, z1d, weights=self.weights1d)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_2d_without_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x, self.y, self.z)
assert_allclose(fit.parameters[0], self.z.mean(), atol=10**(-2))
def test_2d_without_weights_with_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, mask = fitter(model, self.x, self.y, self.z)
assert((~mask).sum() == self.z.size - 2)
assert(mask[0,0] and mask[0,1])
assert_allclose(fit.parameters[0], 0.0, atol=10**(-2))
def test_2d_with_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LevMarLSQFitter() # LinearLSQFitter doesn't handle weights properly in 2D
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_2d_with_weights_with_sigma_clip(self):
"""smoke test for #7020 - fails without fitting.py patch because weights does not propagate"""
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, filtered = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
assert(fit.parameters[0] < 1.0) # outliers didn't pull it out of [-1:1] because they had been removed
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitters_with_weights():
"""Issue #5737 """
Xin, Yin = np.mgrid[0:21, 0:21]
fitter = LevMarLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
# Non-linear model
g2 = models.Gaussian2D(10, 10, 9, 2, 3)
z = g2(Xin, Yin)
gmod = fitter(models.Gaussian2D(15, 7, 8, 1.3, 1.2), Xin, Yin, z + zsig)
assert_allclose(gmod.parameters, g2.parameters, atol=10 ** (-2))
# Linear model
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10)/1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig)
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitters_interface():
"""
Test that **kwargs work with all optimizers.
This is a basic smoke test.
"""
levmar = LevMarLSQFitter()
slsqp = SLSQPLSQFitter()
simplex = SimplexLSQFitter()
kwargs = {'maxiter': 77, 'verblevel': 1, 'epsilon': 1e-2, 'acc': 1e-6}
simplex_kwargs = {'maxiter': 77, 'verblevel': 1, 'acc': 1e-6}
model = models.Gaussian1D(10, 4, .3)
x = np.arange(21)
y = model(x)
slsqp_model = slsqp(model, x, y, **kwargs)
simplex_model = simplex(model, x, y, **simplex_kwargs)
kwargs.pop('verblevel')
lm_model = levmar(model, x, y, **kwargs)
|
b080b797090c3a296540dadfe2a0ae42c8d1733a3e89f9aa4c3e2d5b7eb1272d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from astropy.modeling import models, InputParameterError
from astropy.coordinates import Angle
from astropy.modeling import fitting
from astropy.tests.helper import catch_warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
try:
from scipy import optimize # pylint: disable=W0611
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
def test_sigma_constant():
"""
Test that the GAUSSIAN_SIGMA_TO_FWHM constant matches the
gaussian_sigma_to_fwhm constant in astropy.stats. We define
it manually in astropy.modeling to avoid importing from
astropy.stats.
"""
from astropy.stats.funcs import gaussian_sigma_to_fwhm
from astropy.modeling.functional_models import GAUSSIAN_SIGMA_TO_FWHM
assert gaussian_sigma_to_fwhm == GAUSSIAN_SIGMA_TO_FWHM
def test_Trapezoid1D():
"""Regression test for https://github.com/astropy/astropy/issues/1721"""
model = models.Trapezoid1D(amplitude=4.2, x_0=2.0, width=1.0, slope=3)
xx = np.linspace(0, 4, 8)
yy = model(xx)
yy_ref = [0., 1.41428571, 3.12857143, 4.2, 4.2, 3.12857143, 1.41428571, 0.]
assert_allclose(yy, yy_ref, rtol=0, atol=1e-6)
def test_Gaussian2D():
"""
Test rotated elliptical Gaussian2D model.
https://github.com/astropy/astropy/pull/2038
"""
model = models.Gaussian2D(4.2, 1.7, 3.1, x_stddev=5.1, y_stddev=3.3,
theta=np.pi/6.)
y, x = np.mgrid[0:5, 0:5]
g = model(x, y)
g_ref = [[3.01907812, 2.99051889, 2.81271552, 2.5119566, 2.13012709],
[3.55982239, 3.6086023, 3.4734158, 3.17454575, 2.75494838],
[3.88059142, 4.0257528, 3.96554926, 3.70908389, 3.29410187],
[3.91095768, 4.15212857, 4.18567526, 4.00652015, 3.64146544],
[3.6440466, 3.95922417, 4.08454159, 4.00113878, 3.72161094]]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
assert_allclose([model.x_fwhm, model.y_fwhm],
[12.009582229657841, 7.7709061486021325])
def test_Gaussian2DCovariance():
"""
Test rotated elliptical Gaussian2D model when cov_matrix is input.
https://github.com/astropy/astropy/pull/2199
"""
cov_matrix = [[49., -16.], [-16., 9.]]
model = models.Gaussian2D(17., 2.0, 2.5, cov_matrix=cov_matrix)
y, x = np.mgrid[0:5, 0:5]
g = model(x, y)
g_ref = [[4.3744505, 5.8413977, 7.42988694, 9.00160175, 10.38794269],
[8.83290201, 10.81772851, 12.61946384, 14.02225593, 14.84113227],
[13.68528889, 15.37184621, 16.44637743, 16.76048705, 16.26953638],
[16.26953638, 16.76048705, 16.44637743, 15.37184621, 13.68528889],
[14.84113227, 14.02225593, 12.61946384, 10.81772851, 8.83290201]]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
def test_Gaussian2DRotation():
amplitude = 42
x_mean, y_mean = 0, 0
x_stddev, y_stddev = 2, 3
theta = Angle(10, 'deg')
pars = dict(amplitude=amplitude, x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev)
rotation = models.Rotation2D(angle=theta.degree)
point1 = (x_mean + 2 * x_stddev, y_mean + 2 * y_stddev)
point2 = rotation(*point1)
g1 = models.Gaussian2D(theta=0, **pars)
g2 = models.Gaussian2D(theta=theta.radian, **pars)
value1 = g1(*point1)
value2 = g2(*point2)
assert_allclose(value1, value2)
def test_Gaussian2D_invalid_inputs():
x_stddev = 5.1
y_stddev = 3.3
theta = 10
cov_matrix = [[49., -16.], [-16., 9.]]
# first make sure the valid ones are OK
models.Gaussian2D()
models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=theta)
models.Gaussian2D(x_stddev=None, y_stddev=y_stddev, theta=theta)
models.Gaussian2D(x_stddev=x_stddev, y_stddev=None, theta=theta)
models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=None)
models.Gaussian2D(cov_matrix=cov_matrix)
with pytest.raises(InputParameterError):
models.Gaussian2D(x_stddev=0, cov_matrix=cov_matrix)
with pytest.raises(InputParameterError):
models.Gaussian2D(y_stddev=0, cov_matrix=cov_matrix)
with pytest.raises(InputParameterError):
models.Gaussian2D(theta=0, cov_matrix=cov_matrix)
def test_moffat_fwhm():
ans = 34.641016151377542
kwargs = {'gamma': 10, 'alpha': 0.5}
m1 = models.Moffat1D(**kwargs)
m2 = models.Moffat2D(**kwargs)
assert_allclose([m1.fwhm, m2.fwhm], ans)
def test_RedshiftScaleFactor():
"""Like ``test_ScaleModel()``."""
# Scale by a scalar
m = models.RedshiftScaleFactor(0.4)
assert m(0) == 0
assert_array_equal(m([1, 2]), [1.4, 2.8])
assert_allclose(m.inverse(m([1, 2])), [1, 2])
# Scale by a list
m = models.RedshiftScaleFactor([-0.5, 0, 0.5], n_models=3)
assert_array_equal(m(0), 0)
assert_array_equal(m([1, 2], model_set_axis=False),
[[0.5, 1], [1, 2], [1.5, 3]])
assert_allclose(m.inverse(m([1, 2], model_set_axis=False)),
[[1, 2], [1, 2], [1, 2]])
def test_Ellipse2D():
"""Test Ellipse2D model."""
amplitude = 7.5
x0, y0 = 15, 15
theta = Angle(45, 'deg')
em = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta.radian)
y, x = np.mgrid[0:30, 0:30]
e = em(x, y)
assert np.all(e[e > 0] == amplitude)
assert e[y0, x0] == amplitude
rotation = models.Rotation2D(angle=theta.degree)
point1 = [2, 0] # Rotation2D center is (0, 0)
point2 = rotation(*point1)
point1 = np.array(point1) + [x0, y0]
point2 = np.array(point2) + [x0, y0]
e1 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=0.)
e2 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=theta.radian)
assert e1(*point1) == e2(*point2)
def test_Ellipse2D_circular():
"""Test that circular Ellipse2D agrees with Disk2D [3736]."""
amplitude = 7.5
radius = 10
size = (radius * 2) + 1
y, x = np.mgrid[0:size, 0:size]
ellipse = models.Ellipse2D(amplitude, radius, radius, radius, radius,
theta=0)(x, y)
disk = models.Disk2D(amplitude, radius, radius, radius)(x, y)
assert np.all(ellipse == disk)
def test_Scale_inverse():
m = models.Scale(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_Multiply_inverse():
m = models.Multiply(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_Shift_inverse():
m = models.Shift(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
@pytest.mark.skipif('not HAS_SCIPY')
def test_Shift_model_levmar_fit():
"""Test fitting Shift model with LevMarLSQFitter (issue #6103)."""
init_model = models.Shift()
x = np.arange(10)
y = x+0.1
fitter = fitting.LevMarLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [0.1], atol=1e-15)
def test_Shift_model_set_linear_fit():
"""Test linear fitting of Shift model (issue #6103)."""
init_model = models.Shift(offset=[0, 0], n_models=2)
x = np.arange(10)
yy = np.array([x+0.1, x-0.2])
fitter = fitting.LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.parameters, [0.1, -0.2], atol=1e-15)
@pytest.mark.parametrize('Model', (models.Scale, models.Multiply))
def test_Scale_model_set_linear_fit(Model):
"""Test linear fitting of Scale model (#6103)."""
init_model = Model(factor=[0, 0], n_models=2)
x = np.arange(-3, 7)
yy = np.array([1.15*x, 0.96*x])
fitter = fitting.LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.parameters, [1.15, 0.96], atol=1e-15)
# https://github.com/astropy/astropy/issues/6178
def test_Ring2D_rout():
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=2, r_out=5)
assert m.width.value == 3
@pytest.mark.skipif("not HAS_SCIPY")
def test_Voigt1D():
voi = models.Voigt1D(amplitude_L=-0.5, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0)
xarr = np.linspace(-5.0, 5.0, num=40)
yarr = voi(xarr)
voi_init = models.Voigt1D(amplitude_L=-1.0, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0)
fitter = fitting.LevMarLSQFitter()
voi_fit = fitter(voi_init, xarr, yarr)
assert_allclose(voi_fit.param_sets, voi.param_sets)
@pytest.mark.skipif("not HAS_SCIPY")
def test_compound_models_with_class_variables():
models_2d = [models.AiryDisk2D, models.Sersic2D]
models_1d = [models.Sersic1D]
for model_2d in models_2d:
class CompoundModel2D(models.Const2D + model_2d):
pass
x, y = np.mgrid[:10, :10]
f = CompoundModel2D()(x, y)
assert f.shape == (10, 10)
for model_1d in models_1d:
class CompoundModel1D(models.Const1D + model_1d):
pass
x = np.arange(10)
f = CompoundModel1D()(x)
assert f.shape == (10,)
|
1c0b1fa30d091f9772ee2cda9690d0211b55f1599e334efee372d5c9ecaf5de4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import operator
import numpy as np
from astropy.modeling.utils import ExpressionTree as ET, ellipse_extent
from astropy.modeling.models import Ellipse2D
def test_traverse_postorder_duplicate_subtrees():
"""
Regression test for a bug in `ExpressionTree.traverse_postorder`
where given an expression like ``(1 + 2) + (1 + 2)`` where the two proper
subtrees are actually the same object.
"""
subtree = ET('+', ET(1), ET(2))
tree = ET('+', subtree, subtree)
traversal = [n.value for n in tree.traverse_postorder()]
assert traversal == [1, 2, '+', 1, 2, '+', '+']
# TODO: It might prove useful to implement a simple expression parser to build
# trees; this would be easy and might find use elsewhere
def test_tree_evaluate_subexpression():
"""Test evaluating a subexpression from an expression tree."""
operators = {'+': operator.add, '-': operator.sub, '*': operator.mul,
'/': operator.truediv, '**': operator.pow}
# The full expression represented by this tree is:
# 1.0 + 2 - 3 * 4 / 5 ** 6 (= 2.999232 if you must know)
tree = ET('+', ET(1.0), ET('-', ET(2.0),
ET('*', ET(3.0), ET('/', ET(4.0),
ET('**', ET(5.0), ET(6.0))))))
def test_slice(start, stop, expected):
assert np.allclose(tree.evaluate(operators, start=start, stop=stop),
expected)
assert tree.evaluate(operators) == (1.0 + 2.0 - 3.0 * 4.0 / 5.0 ** 6.0)
test_slice(0, 5, (1.0 + 2.0 - 3.0 * 4.0 / 5.0))
test_slice(0, 4, (1.0 + 2.0 - 3.0 * 4.0))
test_slice(0, 3, (1.0 + 2.0 - 3.0))
test_slice(0, 2, (1.0 + 2.0))
test_slice(0, 1, 1.0)
test_slice(1, 6, (2.0 - 3.0 * 4.0 / 5.0 ** 6.0))
test_slice(1, 5, (2.0 - 3.0 * 4.0 / 5.0))
test_slice(1, 4, (2.0 - 3.0 * 4.0))
test_slice(1, 3, (2.0 - 3.0))
test_slice(1, 2, 2.0)
test_slice(2, 6, (3.0 * 4.0 / 5.0 ** 6.0))
test_slice(2, 5, (3.0 * 4.0 / 5.0))
test_slice(2, 4, (3.0 * 4.0))
test_slice(2, 3, 3.0)
test_slice(3, 6, (4.0 / 5.0 ** 6.0))
test_slice(3, 5, (4.0 / 5.0))
test_slice(3, 4, 4.0)
test_slice(4, 6, (5.0 ** 6.0))
test_slice(4, 5, 5.0)
test_slice(5, 6, 6.0)
def test_ellipse_extent():
# Test this properly bounds the ellipse
imshape = (100, 100)
coords = y, x = np.indices(imshape)
amplitude = 1
x0 = 50
y0 = 50
a = 30
b = 10
theta = np.pi / 4
model = Ellipse2D(amplitude, x0, y0, a, b, theta)
dx, dy = ellipse_extent(a, b, theta)
limits = ((y0 - dy, y0 + dy), (x0 - dx, x0 + dx))
model.bounding_box = limits
actual = model.render(coords=coords)
expected = model(x, y)
# Check that the full ellipse is captured
np.testing.assert_allclose(expected, actual, atol=0, rtol=1)
# Check the bounding_box isn't too large
limits = np.array(limits).flatten()
for i in [0, 1]:
s = actual.sum(axis=i)
diff = np.abs(limits[2 * i] - np.where(s > 0)[0][0])
assert diff < 1
|
8f3b0647fa5abb4cc25750f2e84a88c00f2b8484f55335cbb7fd7b3f56063f9e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for model evaluation.
Compare the results of some models with other programs.
"""
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from .example_models import models_1D, models_2D
from astropy.modeling import fitting, models
from astropy.modeling.core import FittableModel
from astropy.modeling.polynomial import PolynomialBase
from astropy import units as u
from astropy.utils import minversion
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils import NumpyRNGContext
try:
import scipy
from scipy import optimize # pylint: disable=W0611
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
HAS_SCIPY_14 = HAS_SCIPY and minversion(scipy, "0.14")
@pytest.mark.skipif('not HAS_SCIPY')
def test_custom_model(amplitude=4, frequency=1):
def sine_model(x, amplitude=4, frequency=1):
"""
Model function
"""
return amplitude * np.sin(2 * np.pi * frequency * x)
def sine_deriv(x, amplitude=4, frequency=1):
"""
Jacobian of model function, e.g. derivative of the function with
respect to the *parameters*
"""
da = np.sin(2 * np.pi * frequency * x)
df = 2 * np.pi * x * amplitude * np.cos(2 * np.pi * frequency * x)
return np.vstack((da, df))
SineModel = models.custom_model(sine_model, fit_deriv=sine_deriv)
x = np.linspace(0, 4, 50)
sin_model = SineModel()
y = sin_model.evaluate(x, 5., 2.)
y_prime = sin_model.fit_deriv(x, 5., 2.)
np.random.seed(0)
data = sin_model(x) + np.random.rand(len(x)) - 0.5
fitter = fitting.LevMarLSQFitter()
model = fitter(sin_model, x, data)
assert np.all((np.array([model.amplitude.value, model.frequency.value]) -
np.array([amplitude, frequency])) < 0.001)
def test_custom_model_init():
@models.custom_model
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel(amplitude=2., frequency=0.5)
assert sin_model.amplitude == 2.
assert sin_model.frequency == 0.5
def test_custom_model_defaults():
@models.custom_model
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel()
assert SineModel.amplitude.default == 4
assert SineModel.frequency.default == 1
assert sin_model.amplitude == 4
assert sin_model.frequency == 1
def test_custom_model_bounding_box():
"""Test bounding box evaluation for a 3D model"""
def ellipsoid(x, y, z, x0=13, y0=10, z0=8, a=4, b=3, c=2, amp=1):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(models.custom_model(ellipsoid)):
@property
def bounding_box(self):
return ((self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a))
model = Ellipsoid3D()
bbox = model.bounding_box
zlim, ylim, xlim = bbox
dz, dy, dx = np.diff(bbox) / 2
z1, y1, x1 = np.mgrid[slice(zlim[0], zlim[1] + 1),
slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1)]
z2, y2, x2 = np.mgrid[slice(zlim[0] - dz, zlim[1] + dz + 1),
slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1)]
arr = model(x2, y2, z2)
sub_arr = model(x1, y1, z1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7
class Fittable2DModelTester:
"""
Test class for all two dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.1
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, .1)
self.y1 = np.arange(1, 10, .1)
self.y2, self.x2 = np.mgrid[:10, :8]
def test_input2D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x, self.y)
model(self.x1, self.y1)
model(self.x2, self.y2)
def test_eval2D(self, model_class, test_parameters):
"""Test model values add certain given points"""
model = create_model(model_class, test_parameters)
x = test_parameters['x_values']
y = test_parameters['y_values']
z = test_parameters['z_values']
assert np.all((np.abs(model(x, y) - z) < self.eval_error))
def test_bounding_box2D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = ((-5, 5), (-5, 5))
assert model.bounding_box == ((-5, 5), (-5, 5))
model.bounding_box = None
with pytest.raises(NotImplementedError):
model.bounding_box
# test the exception of dimensions don't match
with pytest.raises(ValueError):
model.bounding_box = (-5, 5)
del model.bounding_box
try:
bbox = model.bounding_box
except NotImplementedError:
pytest.skip("Bounding_box is not defined for model.")
ylim, xlim = bbox
dy, dx = np.diff(bbox)/2
y1, x1 = np.mgrid[slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1)]
y2, x2 = np.mgrid[slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1)]
arr = model(x2, y2)
sub_arr = model(x1, y1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitter2D(self, model_class, test_parameters):
"""Test if the parametric model works with the fitter."""
x_lim = test_parameters['x_lim']
y_lim = test_parameters['y_lim']
parameters = test_parameters['parameters']
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.N)
xv, yv = np.meshgrid(x, y)
np.random.seed(0)
# add 10% noise to the amplitude
noise = np.random.rand(self.N, self.N) - 0.5
data = model(xv, yv) + 0.1 * parameters[0] * noise
fitter = fitting.LevMarLSQFitter()
new_model = fitter(model, xv, yv, data)
params = [getattr(new_model, name) for name in new_model.param_names]
fixed = [param.fixed for param in params]
expected = np.array([val for val, fixed in zip(parameters, fixed)
if not fixed])
fitted = np.array([param.value for param in params
if not param.fixed])
assert_allclose(fitted, expected,
atol=self.fit_error)
@pytest.mark.skipif('not HAS_SCIPY')
def test_deriv_2D(self, model_class, test_parameters):
"""
Test the derivative of a model by fitting with an estimated and
analytical derivative.
"""
x_lim = test_parameters['x_lim']
y_lim = test_parameters['y_lim']
if model_class.fit_deriv is None:
pytest.skip("Derivative function is not defined for model.")
if issubclass(model_class, PolynomialBase):
pytest.skip("Skip testing derivative of polynomials.")
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.M)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.M)
xv, yv = np.meshgrid(x, y)
try:
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
model = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
except KeyError:
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model = create_model(model_class, test_parameters,
use_constraints=False)
# add 10% noise to the amplitude
rsn = np.random.RandomState(1234567890)
amplitude = test_parameters['parameters'][0]
n = 0.1 * amplitude * (rsn.rand(self.M, self.N) - 0.5)
data = model(xv, yv) + n
fitter_with_deriv = fitting.LevMarLSQFitter()
new_model_with_deriv = fitter_with_deriv(model_with_deriv, xv, yv,
data)
fitter_no_deriv = fitting.LevMarLSQFitter()
new_model_no_deriv = fitter_no_deriv(model_no_deriv, xv, yv, data,
estimate_jacobian=True)
assert_allclose(new_model_with_deriv.parameters,
new_model_no_deriv.parameters,
rtol=0.1)
class Fittable1DModelTester:
"""
Test class for all one dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.1
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, .1)
self.y1 = np.arange(1, 10, .1)
self.y2, self.x2 = np.mgrid[:10, :8]
def test_input1D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x)
model(self.x1)
model(self.x2)
def test_eval1D(self, model_class, test_parameters):
"""
Test model values at certain given points
"""
model = create_model(model_class, test_parameters)
x = test_parameters['x_values']
y = test_parameters['y_values']
assert_allclose(model(x), y, atol=self.eval_error)
def test_bounding_box1D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = (-5, 5)
model.bounding_box = None
with pytest.raises(NotImplementedError):
model.bounding_box
del model.bounding_box
# test exception if dimensions don't match
with pytest.raises(ValueError):
model.bounding_box = 5
try:
bbox = model.bounding_box
except NotImplementedError:
pytest.skip("Bounding_box is not defined for model.")
if isinstance(model, models.Lorentz1D):
rtol = 0.01 # 1% agreement is enough due to very extended wings
ddx = 0.1 # Finer sampling to "integrate" flux for narrow peak
else:
rtol = 1e-7
ddx = 1
dx = np.diff(bbox) / 2
x1 = np.mgrid[slice(bbox[0], bbox[1] + 1, ddx)]
x2 = np.mgrid[slice(bbox[0] - dx, bbox[1] + dx + 1, ddx)]
arr = model(x2)
sub_arr = model(x1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * rtol
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitter1D(self, model_class, test_parameters):
"""
Test if the parametric model works with the fitter.
"""
x_lim = test_parameters['x_lim']
parameters = test_parameters['parameters']
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
np.random.seed(0)
# add 10% noise to the amplitude
relative_noise_amplitude = 0.01
data = ((1 + relative_noise_amplitude * np.random.randn(len(x))) *
model(x))
fitter = fitting.LevMarLSQFitter()
new_model = fitter(model, x, data)
# Only check parameters that were free in the fit
params = [getattr(new_model, name) for name in new_model.param_names]
fixed = [param.fixed for param in params]
expected = np.array([val for val, fixed in zip(parameters, fixed)
if not fixed])
fitted = np.array([param.value for param in params
if not param.fixed])
assert_allclose(fitted, expected, atol=self.fit_error)
@pytest.mark.skipif('not HAS_SCIPY')
def test_deriv_1D(self, model_class, test_parameters):
"""
Test the derivative of a model by comparing results with an estimated
derivative.
"""
x_lim = test_parameters['x_lim']
if model_class.fit_deriv is None:
pytest.skip("Derivative function is not defined for model.")
if issubclass(model_class, PolynomialBase):
pytest.skip("Skip testing derivative of polynomials.")
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
parameters = test_parameters['parameters']
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False)
# add 10% noise to the amplitude
rsn = np.random.RandomState(1234567890)
n = 0.1 * parameters[0] * (rsn.rand(self.N) - 0.5)
data = model_with_deriv(x) + n
fitter_with_deriv = fitting.LevMarLSQFitter()
new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data)
fitter_no_deriv = fitting.LevMarLSQFitter()
new_model_no_deriv = fitter_no_deriv(model_no_deriv, x, data,
estimate_jacobian=True)
assert_allclose(new_model_with_deriv.parameters,
new_model_no_deriv.parameters, atol=0.15)
def create_model(model_class, test_parameters, use_constraints=True,
parameter_key='parameters'):
"""Create instance of model class."""
constraints = {}
if issubclass(model_class, PolynomialBase):
return model_class(**test_parameters[parameter_key])
elif issubclass(model_class, FittableModel):
if "requires_scipy" in test_parameters and not HAS_SCIPY:
pytest.skip("SciPy not found")
if use_constraints:
if 'constraints' in test_parameters:
constraints = test_parameters['constraints']
return model_class(*test_parameters[parameter_key], **constraints)
@pytest.mark.parametrize(('model_class', 'test_parameters'),
sorted(models_1D.items(), key=lambda x: str(x[0])))
class TestFittable1DModels(Fittable1DModelTester):
pass
@pytest.mark.parametrize(('model_class', 'test_parameters'),
sorted(models_2D.items(), key=lambda x: str(x[0])))
class TestFittable2DModels(Fittable2DModelTester):
pass
def test_ShiftModel():
# Shift by a scalar
m = models.Shift(42)
assert m(0) == 42
assert_equal(m([1, 2]), [43, 44])
# Shift by a list
m = models.Shift([42, 43], n_models=2)
assert_equal(m(0), [42, 43])
assert_equal(m([1, 2], model_set_axis=False),
[[43, 44], [44, 45]])
def test_ScaleModel():
# Scale by a scalar
m = models.Scale(42)
assert m(0) == 0
assert_equal(m([1, 2]), [42, 84])
# Scale by a list
m = models.Scale([42, 43], n_models=2)
assert_equal(m(0), [0, 0])
assert_equal(m([1, 2], model_set_axis=False),
[[42, 84], [43, 86]])
def test_voigt_model():
"""
Currently just tests that the model peaks at its origin.
Regression test for https://github.com/astropy/astropy/issues/3942
"""
m = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
x = np.arange(0, 10, 0.01)
y = m(x)
assert y[500] == y.max() # y[500] is right at the center
def test_model_instance_repr():
m = models.Gaussian1D(1.5, 2.5, 3.5)
assert repr(m) == '<Gaussian1D(amplitude=1.5, mean=2.5, stddev=3.5)>'
@pytest.mark.skipif("not HAS_SCIPY_14")
def test_tabular_interp_1d():
"""
Test Tabular1D model.
"""
points = np.arange(0, 5)
values = [1., 10, 2, 45, -3]
LookupTable = models.tabular_model(1)
model = LookupTable(points=points, lookup_table=values)
xnew = [0., .7, 1.4, 2.1, 3.9]
ans1 = [1., 7.3, 6.8, 6.3, 1.8]
assert_allclose(model(xnew), ans1)
# Test evaluate without passing `points`.
model = LookupTable(lookup_table=values)
assert_allclose(model(xnew), ans1)
# Test bounds error.
xextrap = [0., .7, 1.4, 2.1, 3.9, 4.1]
with pytest.raises(ValueError):
model(xextrap)
# test extrapolation and fill value
model = LookupTable(lookup_table=values, bounds_error=False,
fill_value=None)
assert_allclose(model(xextrap),
[1., 7.3, 6.8, 6.3, 1.8, -7.8])
# Test unit support
xnew = xnew * u.nm
ans1 = ans1 * u.nJy
model = LookupTable(points=points*u.nm, lookup_table=values*u.nJy)
assert_quantity_allclose(model(xnew), ans1)
assert_quantity_allclose(model(xnew.to(u.nm)), ans1)
assert model.bounding_box == (0 * u.nm, 4 * u.nm)
# Test fill value unit conversion and unitless input on table with unit
model = LookupTable([1, 2, 3], [10, 20, 30] * u.nJy, bounds_error=False,
fill_value=1e-33*(u.W / (u.m * u.m * u.Hz)))
assert_quantity_allclose(model(np.arange(5)),
[100, 10, 20, 30, 100] * u.nJy)
@pytest.mark.skipif("not HAS_SCIPY_14")
def test_tabular_interp_2d():
table = np.array([
[-0.04614432, -0.02512547, -0.00619557, 0.0144165, 0.0297525],
[-0.04510594, -0.03183369, -0.01118008, 0.01201388, 0.02496205],
[-0.05464094, -0.02804499, -0.00960086, 0.01134333, 0.02284104],
[-0.04879338, -0.02539565, -0.00440462, 0.01795145, 0.02122417],
[-0.03637372, -0.01630025, -0.00157902, 0.01649774, 0.01952131]])
points = np.arange(0, 5)
points = (points, points)
xnew = np.array([0., .7, 1.4, 2.1, 3.9])
LookupTable = models.tabular_model(2)
model = LookupTable(points, table)
znew = model(xnew, xnew)
result = np.array(
[-0.04614432, -0.03450009, -0.02241028, -0.0069727, 0.01938675])
assert_allclose(znew, result, atol=1e-7)
# test 2D arrays as input
a = np.arange(12).reshape((3, 4))
y, x = np.mgrid[:3, :4]
t = models.Tabular2D(lookup_table=a)
r = t(y, x)
assert_allclose(a, r)
with pytest.raises(ValueError):
model = LookupTable(points=([1.2, 2.3], [1.2, 6.7], [3, 4]))
with pytest.raises(ValueError):
model = LookupTable(lookup_table=[1, 2, 3])
with pytest.raises(NotImplementedError):
model = LookupTable(n_models=2)
with pytest.raises(ValueError):
model = LookupTable(([1, 2], [3, 4]), [5, 6])
with pytest.raises(ValueError):
model = LookupTable(([1, 2] * u.m, [3, 4]), [[5, 6], [7, 8]])
with pytest.raises(ValueError):
model = LookupTable(points, table, bounds_error=False,
fill_value=1*u.Jy)
# Test unit support
points = points[0] * u.nm
points = (points, points)
xnew = xnew * u.nm
model = LookupTable(points, table * u.nJy)
result = result * u.nJy
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7*u.nJy)
xnew = xnew.to(u.m)
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7*u.nJy)
bbox = (0 * u.nm, 4 * u.nm)
bbox = (bbox, bbox)
assert model.bounding_box == bbox
@pytest.mark.skipif("not HAS_SCIPY_14")
def test_tabular_nd():
a = np.arange(24).reshape((2, 3, 4))
x, y, z = np.mgrid[:2, :3, :4]
tab = models.tabular_model(3)
t = tab(lookup_table=a)
result = t(x, y, z)
assert_allclose(a, result)
with pytest.raises(ValueError):
models.tabular_model(0)
def test_with_bounding_box():
"""
Test the option to evaluate a model respecting
its bunding_box.
"""
p = models.Polynomial2D(2) & models.Polynomial2D(2)
m = models.Mapping((0, 1, 0, 1)) | p
with NumpyRNGContext(1234567):
m.parameters = np.random.rand(12)
m.bounding_box = ((3, 9), (1, 8))
x, y = np.mgrid[:10, :10]
a, b = m(x, y)
aw, bw = m(x, y, with_bounding_box=True)
ind = (~np.isnan(aw)).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
aw, bw = m(x, y, with_bounding_box=True, fill_value=1000)
ind = (aw != 1000).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
# test the order of bbox is not reversed for 1D models
p = models.Polynomial1D(1, c0=12, c1=2.3)
p.bounding_box = (0, 5)
assert(p(1) == p(1, with_bounding_box=True))
|
c3dacaf22e5b4ad5f010f981da919a2235ae7bb4f4b8b464b4fdeb1bc002a465 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests model set evaluation for some common use cases.
"""
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.modeling.models import Polynomial1D, Polynomial2D
from astropy.modeling.fitting import LinearLSQFitter
from astropy.modeling.core import Model
from astropy.modeling.parameters import Parameter
x = np.arange(4)
xx = np.array([x, x + 10])
xxx = np.arange(24).reshape((3, 4, 2))
class TParModel(Model):
"""
A toy model to test parameters machinery
"""
# standard_broadasting = False
inputs = ('x',)
outputs = ('x',)
coeff = Parameter()
e = Parameter()
def __init__(self, coeff, e, **kwargs):
super().__init__(coeff=coeff, e=e, **kwargs)
@staticmethod
def evaluate(x, coeff, e):
return x*coeff + e
def test_model_axis_1():
"""
Test that a model initialized with model_set_axis=1
can be evaluated with model_set_axis=False.
"""
model_axis = 1
n_models = 2
p1 = Polynomial1D(1, n_models=n_models, model_set_axis=model_axis)
p1.c0 = [2, 3]
p1.c1 = [1, 2]
t1 = Polynomial1D(1, c0=2, c1=1)
t2 = Polynomial1D(1, c0=3, c1=2)
with pytest.raises(ValueError):
p1(x)
with pytest.raises(ValueError):
p1(xx)
y = p1(x, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0], t1(x))
assert_allclose(y[:, 1], t2(x))
y = p1(xx, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0, :], t1(xx))
assert_allclose(y[:, 1, :], t2(xx))
y = p1(xxx, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0, :, :], t1(xxx))
assert_allclose(y[:, 1, :, :], t2(xxx))
def test_model_axis_2():
"""
Test that a model initialized with model_set_axis=2
can be evaluated with model_set_axis=False.
"""
p1 = Polynomial1D(1, c0=[[[1, 2,3 ]]], c1=[[[10, 20, 30]]],
n_models=3, model_set_axis=2)
t1 = Polynomial1D(1, c0=1, c1=10)
t2 = Polynomial1D(1, c0=2, c1=20)
t3 = Polynomial1D(1, c0=3, c1=30)
with pytest.raises(ValueError):
p1(x)
with pytest.raises(ValueError):
p1(xx)
y = p1(x, model_set_axis=False)
assert y.shape == (1, 4, 3)
assert_allclose(y[:, :, 0].flatten(), t1(x))
assert_allclose(y[:, :, 1].flatten(), t2(x))
assert_allclose(y[:, :, 2].flatten(), t3(x))
p2 = Polynomial2D(1, c0_0=[[[0,1,2]]], c0_1=[[[3,4,5]]],
c1_0=[[[5,6,7]]], n_models=3, model_set_axis=2)
t1 = Polynomial2D(1, c0_0=0, c0_1=3, c1_0=5)
t2 = Polynomial2D(1, c0_0=1, c0_1=4, c1_0=6)
t3 = Polynomial2D(1, c0_0=2, c0_1=5, c1_0=7)
assert p2.c0_0.shape == ()
y = p2(x, x, model_set_axis=False)
assert y.shape == (1, 4, 3)
# These are columns along the 2nd axis.
assert_allclose(y[:, :, 0].flatten(), t1(x, x))
assert_allclose(y[:, :, 1].flatten(), t2(x, x))
assert_allclose(y[:, :, 2].flatten(), t3(x, x))
def test_axis_0():
"""
Test that a model initialized with model_set_axis=0
can be evaluated with model_set_axis=False.
"""
p1 = Polynomial1D(1, n_models=2, model_set_axis=0)
p1.c0 = [2, 3]
p1.c1 = [1, 2]
t1 = Polynomial1D(1, c0=2, c1=1)
t2 = Polynomial1D(1, c0=3, c1=2)
with pytest.raises(ValueError):
p1(x)
y = p1(xx)
assert len(y) == 2
assert_allclose(y[0], t1(xx[0]))
assert_allclose(y[1], t2(xx[1]))
y = p1(x, model_set_axis=False)
assert len(y) == 2
assert_allclose(y[0], t1(x))
assert_allclose(y[1], t2(x))
y = p1(xx, model_set_axis=False)
assert len(y) == 2
assert_allclose(y[0], t1(xx))
assert_allclose(y[1], t2(xx))
y = p1(xxx, model_set_axis=False)
assert_allclose(y[0], t1(xxx))
assert_allclose(y[1], t2(xxx))
assert len(y) == 2
def test_negative_axis():
p1 = Polynomial1D(1, c0=[1, 2], c1=[3, 4], n_models=2, model_set_axis=-1)
t1 = Polynomial1D(1, c0=1,c1=3)
t2 = Polynomial1D(1, c0=2,c1=4)
with pytest.raises(ValueError):
p1(x)
with pytest.raises(ValueError):
p1(xx)
xxt = xx.T
y = p1(xxt)
assert_allclose(y[: ,0], t1(xxt[: ,0]))
assert_allclose(y[: ,1], t2(xxt[: ,1]))
def test_shapes():
p2 = Polynomial1D(1, n_models=3, model_set_axis=2)
assert p2.c0.shape == ()
assert p2.c1.shape == ()
p1 = Polynomial1D(1, n_models=2, model_set_axis=1)
assert p1.c0.shape == ()
assert p1.c1.shape == ()
p1 = Polynomial1D(1, c0=[1, 2], c1=[3, 4], n_models=2, model_set_axis=-1)
assert p1.c0.shape == ()
assert p1.c1.shape == ()
e1 = [1, 2]
e2 = [3, 4]
a1 = np.array([[10, 20], [30, 40]])
a2 = np.array([[50, 60], [70, 80]])
t = TParModel([a1, a2], [e1, e2], n_models=2, model_set_axis=-1)
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2,)
t = TParModel([[a1, a2]], [[e1, e2]], n_models=2, model_set_axis=1)
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2,)
t = TParModel([a1, a2], [e1, e2], n_models=2, model_set_axis=0)
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2,)
t = TParModel([a1, a2], e=[1, 2], n_models=2, model_set_axis=0)
assert t.coeff.shape == (2, 2)
assert t.e.shape == ()
def test_linearlsqfitter():
"""
Issue #7159
"""
p = Polynomial1D(1, n_models=2, model_set_axis=1)
# Generate data for fitting 2 models and re-stack them along the last axis:
y = np.array([2*x+1, x+4])
y = np.rollaxis(y, 0, -1).T
f = LinearLSQFitter()
# This seems to fit the model_set correctly:
fit = f(p, x, y)
model_y = fit(x, model_set_axis=False)
m1 = Polynomial1D(1, c0=fit.c0[0][0], c1=fit.c1[0][0])
m2 = Polynomial1D(1, c0=fit.c0[0][1], c1=fit.c1[0][1])
assert_allclose(model_y[:, 0], m1(x))
assert_allclose(model_y[:, 1], m2(x))
def test_model_set_axis_outputs():
fitter = LinearLSQFitter()
model_set = Polynomial2D(1, n_models=2, model_set_axis=2)
y2, x2 = np.mgrid[: 5, : 5]
# z = np.moveaxis([x2 + y2, 1 - 0.1 * x2 + 0.2 * y2]), 0, 2)
z = np.rollaxis(np.array([x2 + y2, 1 - 0.1 * x2 + 0.2 * y2]), 0, 3)
model = fitter(model_set, x2, y2, z)
res = model(x2, y2, model_set_axis=False)
assert z.shape == res.shape
# Test initializing with integer model_set_axis
# and evaluating with a different model_set_axis
model_set = Polynomial1D(1, c0=[1, 2], c1=[2, 3],
n_models=2, model_set_axis=0)
y0 = model_set(xx)
y1 = model_set(xx.T, model_set_axis=1)
assert_allclose(y0[0], y1[:, 0])
assert_allclose(y0[1], y1[:, 1])
model_set = Polynomial1D(1, c0=[[1, 2]], c1=[[2, 3]],
n_models=2, model_set_axis=1)
y0 = model_set(xx.T)
y1 = model_set(xx, model_set_axis=0)
assert_allclose(y0[:, 0], y1[0])
assert_allclose(y0[:, 1], y1[1])
with pytest.raises(ValueError):
model_set(x)
|
2401097dc23a91917ac8a9c54e0ea06767c9a239c67cbeb7463b6418f30ca041 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.wcs import wcs
from astropy.modeling import models
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
@pytest.mark.parametrize(('inp'), [(0, 0), (4000, -20.56), (-2001.5, 45.9),
(0, 90), (0, -90), (np.mgrid[:4, :6])])
def test_against_wcslib(inp):
w = wcs.WCS()
crval = [202.4823228, 47.17511893]
w.wcs.crval = crval
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
lonpole = 180
tan = models.Pix2Sky_TAN()
n2c = models.RotateNative2Celestial(crval[0] * u.deg, crval[1] * u.deg, lonpole * u.deg)
c2n = models.RotateCelestial2Native(crval[0] * u.deg, crval[1] * u.deg, lonpole * u.deg)
m = tan | n2c
minv = c2n | tan.inverse
radec = w.wcs_pix2world(inp[0], inp[1], 1)
xy = w.wcs_world2pix(radec[0], radec[1], 1)
assert_allclose(m(*inp), radec, atol=1e-12)
assert_allclose(minv(*radec), xy, atol=1e-12)
@pytest.mark.parametrize(('inp'), [(40 * u.deg, -0.057 * u.rad), (21.5 * u.arcsec, 45.9 * u.deg)])
def test_roundtrip_sky_rotaion(inp):
lon, lat, lon_pole = 42 * u.deg, (43 * u.deg).to(u.arcsec), (44 * u.deg).to(u.rad)
n2c = models.RotateNative2Celestial(lon, lat, lon_pole)
c2n = models.RotateCelestial2Native(lon, lat, lon_pole)
assert_quantity_allclose(n2c.inverse(*n2c(*inp)), inp, atol=1e-13 * u.deg)
assert_quantity_allclose(c2n.inverse(*c2n(*inp)), inp, atol=1e-13 * u.deg)
def test_Rotation2D():
model = models.Rotation2D(angle=90 * u.deg)
a, b = 1 * u.deg, 0 * u.deg
x, y = model(a, b)
assert_quantity_allclose([x, y], [0 * u.deg, 1 * u.deg], atol=1e-10 * u.deg)
def test_Rotation2D_inverse():
model = models.Rotation2D(angle=234.23494 * u.deg)
x, y = model.inverse(*model(1 * u.deg, 0 * u.deg))
assert_quantity_allclose([x, y], [1 * u.deg, 0 * u.deg], atol=1e-10 * u.deg)
def test_euler_angle_rotations():
ydeg = (90 * u.deg, 0 * u.deg)
y = (90, 0)
z = (0, 90)
# rotate y into minus z
model = models.EulerAngleRotation(0 * u.rad, np.pi / 2 * u.rad, 0 * u.rad, 'zxz')
assert_allclose(model(*z), y, atol=10**-12)
model = models.EulerAngleRotation(0 * u.deg, 90 * u.deg, 0 * u.deg, 'zxz')
assert_quantity_allclose(model(*(z * u.deg)), ydeg, atol=10**-12 * u.deg)
@pytest.mark.parametrize(('params'), [(60, 10, 25),
(60 * u.deg, 10 * u.deg, 25 * u.deg),
((60 * u.deg).to(u.rad),
(10 * u.deg).to(u.rad),
(25 * u.deg).to(u.rad))])
def test_euler_rotations_with_units(params):
x = 1 * u.deg
y = 1 * u.deg
phi, theta, psi = params
urot = models.EulerAngleRotation(phi, theta, psi, axes_order='xyz')
a, b = urot(x.value, y.value)
assert_allclose((a, b), (-23.614457631192547, 9.631254579686113))
a, b = urot(x, y)
assert_quantity_allclose((a, b), (-23.614457631192547 * u.deg, 9.631254579686113 * u.deg))
a, b = urot(x.to(u.rad), y.to(u.rad))
assert_quantity_allclose((a, b), (-23.614457631192547 * u.deg, 9.631254579686113 * u.deg))
def test_attributes():
n2c = models.RotateNative2Celestial(20016 * u.arcsec, -72.3 * u.deg, np.pi * u.rad)
assert_allclose(n2c.lat.value, -72.3)
assert_allclose(n2c.lat._raw_value, -1.2618730491919001)
assert_allclose(n2c.lon.value, 20016)
assert_allclose(n2c.lon._raw_value, 0.09704030641088472)
assert_allclose(n2c.lon_pole.value, np.pi)
assert_allclose(n2c.lon_pole._raw_value, np.pi)
assert(n2c.lon.unit is u.Unit("arcsec"))
assert(n2c._param_metrics['lon']['raw_unit'] is u.Unit("rad"))
assert(n2c.lat.unit is u.Unit("deg"))
assert(n2c._param_metrics['lat']['raw_unit'] is u.Unit("rad"))
assert(n2c.lon_pole.unit is u.Unit("rad"))
assert(n2c._param_metrics['lon_pole']['raw_unit'] is u.Unit("rad"))
|
0afc331ab97f0363031f0e6b3070d11759244bca8326cc429a1950a315fe11c2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests that relate to evaluating models with quantity parameters
"""
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy.modeling.core import Model
from astropy.modeling.models import Gaussian1D, Shift, Scale, Pix2Sky_TAN
from astropy import units as u
from astropy.units import UnitsError
from astropy.tests.helper import assert_quantity_allclose
# We start off by taking some simple cases where the units are defined by
# whatever the model is initialized with, and we check that the model evaluation
# returns quantities.
def test_evaluate_with_quantities():
"""
Test evaluation of a single model with Quantity parameters that do
not explicitly require units.
"""
# We create two models here - one with quantities, and one without. The one
# without is used to create the reference values for comparison.
g = Gaussian1D(1, 1, 0.1)
gq = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# We first check that calling the Gaussian with quantities returns the
# expected result
assert_quantity_allclose(gq(1 * u.m), g(1) * u.J)
# Units have to be specified for the Gaussian with quantities - if not, an
# error is raised
with pytest.raises(UnitsError) as exc:
gq(1)
assert exc.value.args[0] == ("Gaussian1D: Units of input 'x', (dimensionless), could not be "
"converted to required input units of m (length)")
# However, zero is a special case
assert_quantity_allclose(gq(0), g(0) * u.J)
# We can also evaluate models with equivalent units
assert_allclose(gq(0.0005 * u.km).value, g(0.5))
# But not with incompatible units
with pytest.raises(UnitsError) as exc:
gq(3 * u.s)
assert exc.value.args[0] == ("Gaussian1D: Units of input 'x', s (time), could not be "
"converted to required input units of m (length)")
# We also can't evaluate the model without quantities with a quantity
with pytest.raises(UnitsError) as exc:
g(3 * u.m)
# TODO: determine what error message should be here
# assert exc.value.args[0] == ("Units of input 'x', m (length), could not be "
# "converted to required dimensionless input")
def test_evaluate_with_quantities_and_equivalencies():
"""
We now make sure that equivalencies are correctly taken into account
"""
g = Gaussian1D(1 * u.Jy, 10 * u.nm, 2 * u.nm)
# We aren't setting the equivalencies, so this won't work
with pytest.raises(UnitsError) as exc:
g(30 * u.PHz)
assert exc.value.args[0] == ("Gaussian1D: Units of input 'x', PHz (frequency), could "
"not be converted to required input units of "
"nm (length)")
# But it should now work if we pass equivalencies when evaluating
assert_quantity_allclose(g(30 * u.PHz, equivalencies={'x': u.spectral()}),
g(9.993081933333332 * u.nm))
class MyTestModel(Model):
inputs = ('a', 'b')
outputs = ('f',)
def evaluate(self, a, b):
print('a', a)
print('b', b)
return a * b
class TestInputUnits():
def setup_method(self, method):
self.model = MyTestModel()
def test_evaluate(self):
# We should be able to evaluate with anything
assert_quantity_allclose(self.model(3, 5), 15)
assert_quantity_allclose(self.model(4 * u.m, 5), 20 * u.m)
assert_quantity_allclose(self.model(3 * u.deg, 5), 15 * u.deg)
def test_input_units(self):
self.model._input_units = {'a': u.deg}
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad)
assert_quantity_allclose(self.model(4 * u.rad, 2 * u.s), 8 * u.rad * u.s)
with pytest.raises(UnitsError) as exc:
self.model(4 * u.s, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'a', s (time), could not be "
"converted to required input units of deg (angle)")
with pytest.raises(UnitsError) as exc:
self.model(3, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'a', (dimensionless), could "
"not be converted to required input units of deg (angle)")
def test_input_units_allow_dimensionless(self):
self.model._input_units = {'a': u.deg}
self.model._input_units_allow_dimensionless = True
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad)
with pytest.raises(UnitsError) as exc:
self.model(4 * u.s, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'a', s (time), could not be "
"converted to required input units of deg (angle)")
assert_quantity_allclose(self.model(3, 3), 9)
def test_input_units_strict(self):
self.model._input_units = {'a': u.deg}
self.model._input_units_strict = True
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
result = self.model(np.pi * u.rad, 2)
assert_quantity_allclose(result, 360 * u.deg)
assert result.unit is u.deg
def test_input_units_equivalencies(self):
self.model._input_units = {'a': u.micron}
with pytest.raises(UnitsError) as exc:
self.model(3 * u.PHz, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'a', PHz (frequency), could "
"not be converted to required input units of "
"micron (length)")
self.model.input_units_equivalencies = {'a': u.spectral()}
assert_quantity_allclose(self.model(3 * u.PHz, 3),
3 * (3 * u.PHz).to(u.micron, equivalencies=u.spectral()))
def test_return_units(self):
self.model._input_units = {'a': u.deg}
self.model._return_units = {'f': u.rad}
result = self.model(3 * u.deg, 4)
assert_quantity_allclose(result, 12 * u.deg)
assert result.unit is u.rad
def test_return_units_scalar(self):
# Check that return_units also works when giving a single unit since
# there is only one output, so is unambiguous.
self.model._input_units = {'a': u.deg}
self.model._return_units = u.rad
result = self.model(3 * u.deg, 4)
assert_quantity_allclose(result, 12 * u.deg)
assert result.unit is u.rad
def test_and_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 & s2
out = cs(10 * u.arcsecond, 20 * u.arcsecond)
assert_quantity_allclose(out[0], 10 * u.deg + 10 * u.arcsec)
assert_quantity_allclose(out[1], 10 * u.deg + 20 * u.arcsec)
def test_plus_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 + s2
out = cs(10 * u.arcsecond)
assert_quantity_allclose(out, 20 * u.deg + 20 * u.arcsec)
def test_compound_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
out = cs(10 * u.arcsecond)
assert_quantity_allclose(out, 20 * u.deg + 10 * u.arcsec)
def test_compound_input_units_fail():
"""
Test incompatible units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
with pytest.raises(UnitsError):
cs(10 * u.pix)
def test_compound_incompatible_units_fail():
"""
Test incompatible model units in chain.
"""
s1 = Shift(10 * u.pix)
s2 = Shift(10 * u.deg)
cs = s1 | s2
with pytest.raises(UnitsError):
cs(10 * u.pix)
def test_compound_pipe_equiv_call():
"""
Check that equivalencies work when passed to evaluate, for a chained model
(which has one input).
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
out = cs(10 * u.pix, equivalencies={'x': u.pixel_scale(0.5 * u.deg / u.pix)})
assert_quantity_allclose(out, 25 * u.deg)
def test_compound_and_equiv_call():
"""
Check that equivalencies work when passed to evaluate, for a compsite model
with two inputs.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 & s2
out = cs(10 * u.pix, 10 * u.pix, equivalencies={'x0': u.pixel_scale(0.5 * u.deg / u.pix),
'x1': u.pixel_scale(0.5 * u.deg / u.pix)})
assert_quantity_allclose(out[0], 15 * u.deg)
assert_quantity_allclose(out[1], 15 * u.deg)
def test_compound_input_units_equivalencies():
"""
Test setting input_units_equivalencies on one of the models.
"""
s1 = Shift(10 * u.deg)
s1.input_units_equivalencies = {'x': u.pixel_scale(0.5 * u.deg / u.pix)}
s2 = Shift(10 * u.deg)
sp = Shift(10 * u.pix)
cs = s1 | s2
out = cs(10 * u.pix)
assert_quantity_allclose(out, 25 * u.deg)
cs = sp | s1
out = cs(10 * u.pix)
assert_quantity_allclose(out, 20 * u.deg)
cs = s1 & s2
cs = cs.rename('TestModel')
out = cs(20 * u.pix, 10 * u.deg)
assert_quantity_allclose(out, 20 * u.deg)
with pytest.raises(UnitsError) as exc:
out = cs(20 * u.pix, 10 * u.pix)
assert exc.value.args[0] == "TestModel: Units of input 'x1', pix (unknown), could not be converted to required input units of deg (angle)"
def test_compound_input_units_strict():
"""
Test setting input_units_strict on one of the models.
"""
class ScaleDegrees(Scale):
input_units = {'x': u.deg}
s1 = ScaleDegrees(2)
s2 = Scale(2)
cs = s1 | s2
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
assert out.unit is u.deg # important since this tests input_units_strict
cs = s2 | s1
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
assert out.unit is u.deg # important since this tests input_units_strict
cs = s1 & s2
out = cs(10 * u.arcsec, 10 * u.arcsec)
assert_quantity_allclose(out, 20 * u.arcsec)
assert out[0].unit is u.deg
assert out[1].unit is u.arcsec
def test_compound_input_units_allow_dimensionless():
"""
Test setting input_units_allow_dimensionless on one of the models.
"""
class ScaleDegrees(Scale):
input_units = {'x': u.deg}
s1 = ScaleDegrees(2)
s1._input_units_allow_dimensionless = True
s2 = Scale(2)
cs = s1 | s2
cs = cs.rename('TestModel')
out = cs(10)
assert_quantity_allclose(out, 40 * u.one)
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
with pytest.raises(UnitsError) as exc:
out = cs(10 * u.m)
assert exc.value.args[0] == "TestModel: Units of input 'x', m (length), could not be converted to required input units of deg (angle)"
s1._input_units_allow_dimensionless = False
cs = s1 | s2
cs = cs.rename('TestModel')
with pytest.raises(UnitsError) as exc:
out = cs(10)
assert exc.value.args[0] == "TestModel: Units of input 'x', (dimensionless), could not be converted to required input units of deg (angle)"
s1._input_units_allow_dimensionless = True
cs = s2 | s1
cs = cs.rename('TestModel')
out = cs(10)
assert_quantity_allclose(out, 40 * u.one)
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
with pytest.raises(UnitsError) as exc:
out = cs(10 * u.m)
assert exc.value.args[0] == "ScaleDegrees: Units of input 'x', m (length), could not be converted to required input units of deg (angle)"
s1._input_units_allow_dimensionless = False
cs = s2 | s1
with pytest.raises(UnitsError) as exc:
out = cs(10)
assert exc.value.args[0] == "ScaleDegrees: Units of input 'x', (dimensionless), could not be converted to required input units of deg (angle)"
s1._input_units_allow_dimensionless = True
s1 = ScaleDegrees(2)
s1._input_units_allow_dimensionless = True
s2 = ScaleDegrees(2)
s2._input_units_allow_dimensionless = False
cs = s1 & s2
cs = cs.rename('TestModel')
out = cs(10, 10 * u.arcsec)
assert_quantity_allclose(out[0], 20 * u.one)
assert_quantity_allclose(out[1], 20 * u.arcsec)
with pytest.raises(UnitsError) as exc:
out = cs(10, 10)
assert exc.value.args[0] == "TestModel: Units of input 'x1', (dimensionless), could not be converted to required input units of deg (angle)"
def test_compound_return_units():
"""
Test that return_units on the first model in the chain is respected for the
input to the second.
"""
class PassModel(Model):
inputs = ('x', 'y')
outputs = ('x', 'y')
@property
def input_units(self):
""" Input units. """
return {'x': u.deg, 'y': u.deg}
@property
def return_units(self):
""" Output units. """
return {'x': u.deg, 'y': u.deg}
def evaluate(self, x, y):
return x.value, y.value
cs = Pix2Sky_TAN() | PassModel()
assert_quantity_allclose(cs(0*u.deg, 0*u.deg), (0, 90)*u.deg)
|
570f7d79f225883b8b848d61ca3bd801c9b0220bdcf6412898a4f172a6b53161 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests that relate to fitting models with quantity parameters
"""
import numpy as np
import pytest
from astropy.modeling import models
from astropy import units as u
from astropy.units import UnitsError
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils import NumpyRNGContext
from astropy.modeling import fitting
try:
from scipy import optimize
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
# Fitting should be as intuitive as possible to the user. Essentially, models
# and fitting should work without units, but if one has units, the other should
# have units too, and the resulting fitted parameters will also have units.
def _fake_gaussian_data():
# Generate fake data
with NumpyRNGContext(12345):
x = np.linspace(-5., 5., 2000)
y = 3 * np.exp(-0.5 * (x - 1.3)**2 / 0.8**2)
y += np.random.normal(0., 0.2, x.shape)
# Attach units to data
x = x * u.m
y = y * u.Jy
return x, y
compound_models_no_units = [models.Linear1D() + models.Gaussian1D() | models.Scale(),
models.Linear1D() + models.Gaussian1D() + models.Gaussian1D(),
models.Linear1D() + models.Gaussian1D() | models.Shift(),
]
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitting_simple():
x, y = _fake_gaussian_data()
# Fit the data using a Gaussian with units
g_init = models.Gaussian1D()
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, x, y)
# TODO: update actual numerical results once implemented, but these should
# be close to the values below.
assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)
assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)
assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitting_with_initial_values():
x, y = _fake_gaussian_data()
# Fit the data using a Gaussian with units
g_init = models.Gaussian1D(amplitude=1. * u.mJy,
mean=3 * u.cm,
stddev=2 * u.mm)
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, x, y)
# TODO: update actual numerical results once implemented, but these should
# be close to the values below.
assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)
assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)
assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitting_missing_data_units():
"""
Raise an error if the model has units but the data doesn't
"""
g_init = models.Gaussian1D(amplitude=1. * u.mJy,
mean=3 * u.cm,
stddev=2 * u.mm)
fit_g = fitting.LevMarLSQFitter()
with pytest.raises(UnitsError) as exc:
fit_g(g_init, [1, 2, 3], [4, 5, 6])
assert exc.value.args[0] == ("'cm' (length) and '' (dimensionless) are not "
"convertible")
with pytest.raises(UnitsError) as exc:
fit_g(g_init, [1, 2, 3] * u.m, [4, 5, 6])
assert exc.value.args[0] == ("'mJy' (spectral flux density) and '' "
"(dimensionless) are not convertible")
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitting_missing_model_units():
"""
Proceed if the data has units but the model doesn't
"""
x, y = _fake_gaussian_data()
g_init = models.Gaussian1D(amplitude=1., mean=3, stddev=2)
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, x, y)
assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)
assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)
assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)
g_init = models.Gaussian1D(amplitude=1., mean=3 * u.m, stddev=2 * u.m)
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, x, y)
assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)
assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)
assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitting_incompatible_units():
"""
Raise an error if the data and model have incompatible units
"""
g_init = models.Gaussian1D(amplitude=1. * u.Jy,
mean=3 * u.m,
stddev=2 * u.cm)
fit_g = fitting.LevMarLSQFitter()
with pytest.raises(UnitsError) as exc:
fit_g(g_init, [1, 2, 3] * u.Hz, [4, 5, 6] * u.Jy)
assert exc.value.args[0] == ("'Hz' (frequency) and 'm' (length) are not convertible")
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('model', compound_models_no_units)
def test_compound_without_units(model):
x = np.linspace(-5, 5, 10) * u.Angstrom
with NumpyRNGContext(12345):
y = np.random.sample(10)
fitter = fitting.LevMarLSQFitter()
res_fit = fitter(model, x, y * u.Hz)
assert all([res_fit[i]._has_units for i in range(3)])
z = res_fit(x)
assert isinstance(z, u.Quantity)
res_fit = fitter(model, np.arange(10) * u.Unit('Angstrom'), y)
assert all([res_fit[i]._has_units for i in range(3)])
z = res_fit(x)
assert isinstance(z, np.ndarray)
@pytest.mark.skipif('not HAS_SCIPY')
def test_compound_fitting_with_units():
x = np.linspace(-5, 5, 15) * u.Angstrom
y = np.linspace(-5, 5, 15) * u.Angstrom
fitter = fitting.LevMarLSQFitter()
m = models.Gaussian2D(10*u.Hz,
3*u.Angstrom, 4*u.Angstrom,
1*u.Angstrom, 2*u.Angstrom)
p = models.Planar2D(3*u.Hz/u.Angstrom, 4*u.Hz/u.Angstrom, 1*u.Hz)
model = m + p
z = model(x, y)
res = fitter(model, x, y, z)
assert isinstance(res(x, y), np.ndarray)
assert all([res[i]._has_units for i in range(2)])
model = models.Gaussian2D() + models.Planar2D()
res = fitter(model, x, y, z)
assert isinstance(res(x, y), np.ndarray)
assert all([res[i]._has_units for i in range(2)])
|
c66e6d3c24da616ab477805cc2d1c0112123e3c1d1e5afeb76bc2d9018894ede | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests fitting and model evaluation with various inputs
"""
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.modeling import models
from astropy.modeling import fitting
from astropy.modeling.core import Model, FittableModel, Fittable1DModel
from astropy.modeling.parameters import Parameter
try:
from scipy import optimize # pylint: disable=W0611
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
model1d_params = [
(models.Polynomial1D, [2]),
(models.Legendre1D, [2]),
(models.Chebyshev1D, [2]),
(models.Shift, [2]),
(models.Scale, [2])
]
model2d_params = [
(models.Polynomial2D, [2]),
(models.Legendre2D, [1, 2]),
(models.Chebyshev2D, [1, 2])
]
class TestInputType:
"""
This class tests that models accept numbers, lists and arrays.
Add new models to one of the lists above to test for this.
"""
def setup_class(self):
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, .1)
self.y1 = np.arange(1, 10, .1)
self.y2, self.x2 = np.mgrid[:10, :8]
@pytest.mark.parametrize(('model', 'params'), model1d_params)
def test_input1D(self, model, params):
m = model(*params)
m(self.x)
m(self.x1)
m(self.x2)
@pytest.mark.parametrize(('model', 'params'), model2d_params)
def test_input2D(self, model, params):
m = model(*params)
m(self.x, self.y)
m(self.x1, self.y1)
m(self.x2, self.y2)
class TestFitting:
"""Test various input options to fitting routines."""
def setup_class(self):
self.x1 = np.arange(10)
self.y, self.x = np.mgrid[:10, :10]
def test_linear_fitter_1set(self):
"""1 set 1D x, 1pset"""
expected = np.array([0, 1, 1, 1])
p1 = models.Polynomial1D(3)
p1.parameters = [0, 1, 1, 1]
y1 = p1(self.x1)
pfit = fitting.LinearLSQFitter()
model = pfit(p1, self.x1, y1)
assert_allclose(model.parameters, expected, atol=10 ** (-7))
def test_linear_fitter_Nset(self):
"""1 set 1D x, 2 sets 1D y, 2 param_sets"""
expected = np.array([[0, 0], [1, 1], [2, 2], [3, 3]])
p1 = models.Polynomial1D(3, n_models=2)
p1.parameters = [0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0]
params = {}
for i in range(4):
params[p1.param_names[i]] = [i, i]
p1 = models.Polynomial1D(3, model_set_axis=0, **params)
y1 = p1(self.x1, model_set_axis=False)
pfit = fitting.LinearLSQFitter()
model = pfit(p1, self.x1, y1)
assert_allclose(model.param_sets, expected, atol=10 ** (-7))
def test_linear_fitter_1dcheb(self):
"""1 pset, 1 set 1D x, 1 set 1D y, Chebyshev 1D polynomial"""
expected = np.array(
[[2817.2499999999995,
4226.6249999999991,
1680.7500000000009,
273.37499999999926]]).T
ch1 = models.Chebyshev1D(3)
ch1.parameters = [0, 1, 2, 3]
y1 = ch1(self.x1)
pfit = fitting.LinearLSQFitter()
model = pfit(ch1, self.x1, y1)
assert_allclose(model.param_sets, expected, atol=10 ** (-2))
def test_linear_fitter_1dlegend(self):
"""
1 pset, 1 set 1D x, 1 set 1D y, Legendre 1D polynomial
"""
expected = np.array(
[[1925.5000000000011,
3444.7500000000005,
1883.2500000000014,
364.4999999999996]]).T
leg1 = models.Legendre1D(3)
leg1.parameters = [1, 2, 3, 4]
y1 = leg1(self.x1)
pfit = fitting.LinearLSQFitter()
model = pfit(leg1, self.x1, y1)
assert_allclose(model.param_sets, expected, atol=10 ** (-12))
def test_linear_fitter_1set2d(self):
p2 = models.Polynomial2D(2)
p2.parameters = [0, 1, 2, 3, 4, 5]
expected = [0, 1, 2, 3, 4, 5]
z = p2(self.x, self.y)
pfit = fitting.LinearLSQFitter()
model = pfit(p2, self.x, self.y, z)
assert_allclose(model.parameters, expected, atol=10 ** (-12))
assert_allclose(model(self.x, self.y), z, atol=10 ** (-12))
def test_wrong_numpset(self):
"""
A ValueError is raised if a 1 data set (1d x, 1d y) is fit
with a model with multiple parameter sets.
"""
with pytest.raises(ValueError):
p1 = models.Polynomial1D(5)
y1 = p1(self.x1)
p1 = models.Polynomial1D(5, n_models=2)
pfit = fitting.LinearLSQFitter()
model = pfit(p1, self.x1, y1)
def test_wrong_pset(self):
"""A case of 1 set of x and multiple sets of y and parameters."""
expected = np.array([[1., 0],
[1, 1],
[1, 2],
[1, 3],
[1, 4],
[1, 5]])
p1 = models.Polynomial1D(5, n_models=2)
params = {}
for i in range(6):
params[p1.param_names[i]] = [1, i]
p1 = models.Polynomial1D(5, model_set_axis=0, **params)
y1 = p1(self.x1, model_set_axis=False)
pfit = fitting.LinearLSQFitter()
model = pfit(p1, self.x1, y1)
assert_allclose(model.param_sets, expected, atol=10 ** (-7))
@pytest.mark.skipif('not HAS_SCIPY')
def test_nonlinear_lsqt_1set_1d(self):
"""1 set 1D x, 1 set 1D y, 1 pset NonLinearFitter"""
g1 = models.Gaussian1D(10, mean=3, stddev=.2)
y1 = g1(self.x1)
gfit = fitting.LevMarLSQFitter()
model = gfit(g1, self.x1, y1)
assert_allclose(model.parameters, [10, 3, .2])
@pytest.mark.skipif('not HAS_SCIPY')
def test_nonlinear_lsqt_Nset_1d(self):
"""1 set 1D x, 1 set 1D y, 2 param_sets, NonLinearFitter"""
with pytest.raises(ValueError):
g1 = models.Gaussian1D([10.2, 10], mean=[3, 3.2], stddev=[.23, .2],
n_models=2)
y1 = g1(self.x1, model_set_axis=False)
gfit = fitting.LevMarLSQFitter()
model = gfit(g1, self.x1, y1)
@pytest.mark.skipif('not HAS_SCIPY')
def test_nonlinear_lsqt_1set_2d(self):
"""1 set 2d x, 1set 2D y, 1 pset, NonLinearFitter"""
g2 = models.Gaussian2D(10, x_mean=3, y_mean=4, x_stddev=.3,
y_stddev=.2, theta=0)
z = g2(self.x, self.y)
gfit = fitting.LevMarLSQFitter()
model = gfit(g2, self.x, self.y, z)
assert_allclose(model.parameters, [10, 3, 4, .3, .2, 0])
@pytest.mark.skipif('not HAS_SCIPY')
def test_nonlinear_lsqt_Nset_2d(self):
"""1 set 2d x, 1set 2D y, 2 param_sets, NonLinearFitter"""
with pytest.raises(ValueError):
g2 = models.Gaussian2D([10, 10], [3, 3], [4, 4], x_stddev=[.3, .3],
y_stddev=[.2, .2], theta=[0, 0], n_models=2)
z = g2(self.x.flatten(), self.y.flatten())
gfit = fitting.LevMarLSQFitter()
model = gfit(g2, self.x, self.y, z)
class TestEvaluation:
"""
Test various input options to model evaluation
TestFitting actually covers evaluation of polynomials
"""
def setup_class(self):
self.x1 = np.arange(20)
self.y, self.x = np.mgrid[:10, :10]
def test_non_linear_NYset(self):
"""
This case covers:
N param sets , 1 set 1D x --> N 1D y data
"""
g1 = models.Gaussian1D([10, 10], [3, 3], [.2, .2], n_models=2)
y1 = g1(self.x1, model_set_axis=False)
assert np.all((y1[0, :] - y1[1, :]).nonzero() == np.array([]))
def test_non_linear_NXYset(self):
"""
This case covers: N param sets , N sets 1D x --> N N sets 1D y data
"""
g1 = models.Gaussian1D([10, 10], [3, 3], [.2, .2], n_models=2)
xx = np.array([self.x1, self.x1])
y1 = g1(xx)
assert_allclose(y1[:, 0], y1[:, 1], atol=10 ** (-12))
def test_p1_1set_1pset(self):
"""1 data set, 1 pset, Polynomial1D"""
p1 = models.Polynomial1D(4)
y1 = p1(self.x1)
assert y1.shape == (20,)
def test_p1_nset_npset(self):
"""N data sets, N param_sets, Polynomial1D"""
p1 = models.Polynomial1D(4, n_models=2)
y1 = p1(np.array([self.x1, self.x1]).T, model_set_axis=-1)
assert y1.shape == (20, 2)
assert_allclose(y1[0, :], y1[1, :], atol=10 ** (-12))
def test_p2_1set_1pset(self):
"""1 pset, 1 2D data set, Polynomial2D"""
p2 = models.Polynomial2D(5)
z = p2(self.x, self.y)
assert z.shape == (10, 10)
def test_p2_nset_npset(self):
"""N param_sets, N 2D data sets, Poly2d"""
p2 = models.Polynomial2D(5, n_models=2)
xx = np.array([self.x, self.x])
yy = np.array([self.y, self.y])
z = p2(xx, yy)
assert z.shape == (2, 10, 10)
def test_nset_domain(self):
"""
Test model set with negative model_set_axis.
In this case model_set_axis=-1 is identical to model_set_axis=1.
"""
xx = np.array([self.x1, self.x1]).T
xx[0, 0] = 100
xx[1, 0] = 100
xx[2, 0] = 99
p1 = models.Polynomial1D(5, c0=[1, 2], c1=[3, 4], n_models=2)
yy = p1(xx, model_set_axis=-1)
assert_allclose(xx.shape, yy.shape)
yy1 = p1(xx, model_set_axis=1)
assert_allclose(yy, yy1)
#x1 = xx[:, 0]
#x2 = xx[:, 1]
#p1 = models.Polynomial1D(5)
#assert_allclose(p1(x1), yy[0, :], atol=10 ** (-12))
#p1 = models.Polynomial1D(5)
#assert_allclose(p1(x2), yy[1, :], atol=10 ** (-12))
def test_evaluate_gauss2d(self):
cov = np.array([[1., 0.8], [0.8, 3]])
g = models.Gaussian2D(1., 5., 4., cov_matrix=cov)
y, x = np.mgrid[:10, :10]
g(x, y)
class TModel_1_1(Fittable1DModel):
p1 = Parameter()
p2 = Parameter()
@staticmethod
def evaluate(x, p1, p2):
return x + p1 + p2
class TestSingleInputSingleOutputSingleModel:
"""
A suite of tests to check various cases of parameter and input combinations
on models with n_input = n_output = 1 on a toy model with n_models=1.
Many of these tests mirror test cases in
``astropy.modeling.tests.test_parameters.TestParameterInitialization``,
except that this tests how different parameter arrangements interact with
different types of model inputs.
"""
def test_scalar_parameters_scalar_input(self):
"""
Scalar parameters with a scalar input should return a scalar.
"""
t = TModel_1_1(1, 10)
y = t(100)
assert isinstance(y, float)
assert np.ndim(y) == 0
assert y == 111
def test_scalar_parameters_1d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_1(1, 10)
y = t(np.arange(5) * 100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (5,)
assert np.all(y == [11, 111, 211, 311, 411])
def test_scalar_parameters_2d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_1(1, 10)
y = t(np.arange(6).reshape(2, 3) * 100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (2, 3)
assert np.all(y == [[11, 111, 211],
[311, 411, 511]])
def test_scalar_parameters_3d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_1(1, 10)
y = t(np.arange(12).reshape(2, 3, 2) * 100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (2, 3, 2)
assert np.all(y == [[[11, 111], [211, 311], [411, 511]],
[[611, 711], [811, 911], [1011, 1111]]])
def test_1d_array_parameters_scalar_input(self):
"""
Array parameters should all be broadcastable with each other, and with
a scalar input the output should be broadcast to the maximum dimensions
of the parameters.
"""
t = TModel_1_1([1, 2], [10, 20])
y = t(100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (2,)
assert np.all(y == [111, 122])
def test_1d_array_parameters_1d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_1([1, 2], [10, 20])
y1 = t([100, 200])
assert np.shape(y1) == (2,)
assert np.all(y1 == [111, 222])
y2 = t([[100], [200]])
assert np.shape(y2) == (2, 2)
assert np.all(y2 == [[111, 122], [211, 222]])
with pytest.raises(ValueError):
# Doesn't broadcast
y3 = t([100, 200, 300])
def test_2d_array_parameters_2d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_1([[1, 2], [3, 4]], [[10, 20], [30, 40]])
y1 = t([[100, 200], [300, 400]])
assert np.shape(y1) == (2, 2)
assert np.all(y1 == [[111, 222], [333, 444]])
y2 = t([[[[100]], [[200]]], [[[300]], [[400]]]])
assert np.shape(y2) == (2, 2, 2, 2)
assert np.all(y2 == [[[[111, 122], [133, 144]],
[[211, 222], [233, 244]]],
[[[311, 322], [333, 344]],
[[411, 422], [433, 444]]]])
with pytest.raises(ValueError):
# Doesn't broadcast
y3 = t([[100, 200, 300], [400, 500, 600]])
def test_mixed_array_parameters_1d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_1([[[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]],
[[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]]],
[1, 2, 3])
y1 = t([10, 20, 30])
assert np.shape(y1) == (2, 2, 3)
assert_allclose(y1, [[[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]],
[[11.07, 22.08, 33.09], [11.10, 22.11, 33.12]]])
y2 = t([[[[10]]], [[[20]]], [[[30]]]])
assert np.shape(y2) == (3, 2, 2, 3)
assert_allclose(y2, [[[[11.01, 12.02, 13.03],
[11.04, 12.05, 13.06]],
[[11.07, 12.08, 13.09],
[11.10, 12.11, 13.12]]],
[[[21.01, 22.02, 23.03],
[21.04, 22.05, 23.06]],
[[21.07, 22.08, 23.09],
[21.10, 22.11, 23.12]]],
[[[31.01, 32.02, 33.03],
[31.04, 32.05, 33.06]],
[[31.07, 32.08, 33.09],
[31.10, 32.11, 33.12]]]])
class TestSingleInputSingleOutputTwoModel:
"""
A suite of tests to check various cases of parameter and input combinations
on models with n_input = n_output = 1 on a toy model with n_models=2.
Many of these tests mirror test cases in
``astropy.modeling.tests.test_parameters.TestParameterInitialization``,
except that this tests how different parameter arrangements interact with
different types of model inputs.
With n_models=2 all outputs should have a first dimension of size 2 (unless
defined with model_set_axis != 0).
"""
def test_scalar_parameters_scalar_input(self):
"""
Scalar parameters with a scalar input should return a 1-D array with
size equal to the number of models.
"""
t = TModel_1_1([1, 2], [10, 20], n_models=2)
y = t(100)
assert np.shape(y) == (2,)
assert np.all(y == [111, 122])
def test_scalar_parameters_1d_array_input(self):
"""
The dimension of the input should match the number of models unless
model_set_axis=False is given, in which case the input is copied across
all models.
"""
t = TModel_1_1([1, 2], [10, 20], n_models=2)
with pytest.raises(ValueError):
y = t(np.arange(5) * 100)
y1 = t([100, 200])
assert np.shape(y1) == (2,)
assert np.all(y1 == [111, 222])
y2 = t([100, 200], model_set_axis=False)
# In this case the value [100, 200, 300] should be evaluated on each
# model rather than evaluating the first model with 100 and the second
# model with 200
assert np.shape(y2) == (2, 2)
assert np.all(y2 == [[111, 211], [122, 222]])
y3 = t([100, 200, 300], model_set_axis=False)
assert np.shape(y3) == (2, 3)
assert np.all(y3 == [[111, 211, 311], [122, 222, 322]])
def test_scalar_parameters_2d_array_input(self):
"""
The dimension of the input should match the number of models unless
model_set_axis=False is given, in which case the input is copied across
all models.
"""
t = TModel_1_1([1, 2], [10, 20], n_models=2)
y1 = t(np.arange(6).reshape(2, 3) * 100)
assert np.shape(y1) == (2, 3)
assert np.all(y1 == [[11, 111, 211],
[322, 422, 522]])
y2 = t(np.arange(6).reshape(2, 3) * 100, model_set_axis=False)
assert np.shape(y2) == (2, 2, 3)
assert np.all(y2 == [[[11, 111, 211], [311, 411, 511]],
[[22, 122, 222], [322, 422, 522]]])
def test_scalar_parameters_3d_array_input(self):
"""
The dimension of the input should match the number of models unless
model_set_axis=False is given, in which case the input is copied across
all models.
"""
t = TModel_1_1([1, 2], [10, 20], n_models=2)
data = np.arange(12).reshape(2, 3, 2) * 100
y1 = t(data)
assert np.shape(y1) == (2, 3, 2)
assert np.all(y1 == [[[11, 111], [211, 311], [411, 511]],
[[622, 722], [822, 922], [1022, 1122]]])
y2 = t(data, model_set_axis=False)
assert np.shape(y2) == (2, 2, 3, 2)
assert np.all(y2 == np.array([data + 11, data + 22]))
def test_1d_array_parameters_scalar_input(self):
"""
Array parameters should all be broadcastable with each other, and with
a scalar input the output should be broadcast to the maximum dimensions
of the parameters.
"""
t = TModel_1_1([[1, 2, 3], [4, 5, 6]],
[[10, 20, 30], [40, 50, 60]], n_models=2)
y = t(100)
assert np.shape(y) == (2, 3)
assert np.all(y == [[111, 122, 133], [144, 155, 166]])
def test_1d_array_parameters_1d_array_input(self):
"""
When the input is an array, if model_set_axis=False then it must
broadcast with the shapes of the parameters (excluding the
model_set_axis).
Otherwise all dimensions must be broadcastable.
"""
t = TModel_1_1([[1, 2, 3], [4, 5, 6]],
[[10, 20, 30], [40, 50, 60]], n_models=2)
with pytest.raises(ValueError):
y1 = t([100, 200, 300])
y1 = t([100, 200])
assert np.shape(y1) == (2, 3)
assert np.all(y1 == [[111, 122, 133], [244, 255, 266]])
with pytest.raises(ValueError):
# Doesn't broadcast with the shape of the parameters, (3,)
y2 = t([100, 200], model_set_axis=False)
y2 = t([100, 200, 300], model_set_axis=False)
assert np.shape(y2) == (2, 3)
assert np.all(y2 == [[111, 222, 333],
[144, 255, 366]])
def test_2d_array_parameters_2d_array_input(self):
t = TModel_1_1([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
n_models=2)
y1 = t([[100, 200], [300, 400]])
assert np.shape(y1) == (2, 2, 2)
assert np.all(y1 == [[[111, 222], [133, 244]],
[[355, 466], [377, 488]]])
with pytest.raises(ValueError):
y2 = t([[100, 200, 300], [400, 500, 600]])
y2 = t([[[100, 200], [300, 400]], [[500, 600], [700, 800]]])
assert np.shape(y2) == (2, 2, 2)
assert np.all(y2 == [[[111, 222], [333, 444]],
[[555, 666], [777, 888]]])
def test_mixed_array_parameters_1d_array_input(self):
t = TModel_1_1([[[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]],
[[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]]],
[[1, 2, 3], [4, 5, 6]], n_models=2)
with pytest.raises(ValueError):
y = t([10, 20, 30])
y = t([10, 20, 30], model_set_axis=False)
assert np.shape(y) == (2, 2, 3)
assert_allclose(y, [[[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]],
[[14.07, 25.08, 36.09], [14.10, 25.11, 36.12]]])
class TModel_1_2(FittableModel):
inputs = ('x',)
outputs = ('y', 'z')
p1 = Parameter()
p2 = Parameter()
p3 = Parameter()
@staticmethod
def evaluate(x, p1, p2, p3):
return (x + p1 + p2, x + p1 + p2 + p3)
class TestSingleInputDoubleOutputSingleModel:
"""
A suite of tests to check various cases of parameter and input combinations
on models with n_input = 1 but n_output = 2 on a toy model with n_models=1.
As of writing there are not enough controls to adjust how outputs from such
a model should be formatted (currently the shapes of outputs are assumed to
be directly associated with the shapes of corresponding inputs when
n_inputs == n_outputs). For now, the approach taken for cases like this is
to assume all outputs should have the same format.
"""
def test_scalar_parameters_scalar_input(self):
"""
Scalar parameters with a scalar input should return a scalar.
"""
t = TModel_1_2(1, 10, 1000)
y, z = t(100)
assert isinstance(y, float)
assert isinstance(z, float)
assert np.ndim(y) == np.ndim(z) == 0
assert y == 111
assert z == 1111
def test_scalar_parameters_1d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_2(1, 10, 1000)
y, z = t(np.arange(5) * 100)
assert isinstance(y, np.ndarray)
assert isinstance(z, np.ndarray)
assert np.shape(y) == np.shape(z) == (5,)
assert np.all(y == [11, 111, 211, 311, 411])
assert np.all(z == (y + 1000))
def test_scalar_parameters_2d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_2(1, 10, 1000)
y, z = t(np.arange(6).reshape(2, 3) * 100)
assert isinstance(y, np.ndarray)
assert isinstance(z, np.ndarray)
assert np.shape(y) == np.shape(z) == (2, 3)
assert np.all(y == [[11, 111, 211],
[311, 411, 511]])
assert np.all(z == (y + 1000))
def test_scalar_parameters_3d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_2(1, 10, 1000)
y, z = t(np.arange(12).reshape(2, 3, 2) * 100)
assert isinstance(y, np.ndarray)
assert isinstance(z, np.ndarray)
assert np.shape(y) == np.shape(z) == (2, 3, 2)
assert np.all(y == [[[11, 111], [211, 311], [411, 511]],
[[611, 711], [811, 911], [1011, 1111]]])
assert np.all(z == (y + 1000))
def test_1d_array_parameters_scalar_input(self):
"""
Array parameters should all be broadcastable with each other, and with
a scalar input the output should be broadcast to the maximum dimensions
of the parameters.
"""
t = TModel_1_2([1, 2], [10, 20], [1000, 2000])
y, z = t(100)
assert isinstance(y, np.ndarray)
assert isinstance(z, np.ndarray)
assert np.shape(y) == np.shape(z) == (2,)
assert np.all(y == [111, 122])
assert np.all(z == [1111, 2122])
def test_1d_array_parameters_1d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_2([1, 2], [10, 20], [1000, 2000])
y1, z1 = t([100, 200])
assert np.shape(y1) == np.shape(z1) == (2,)
assert np.all(y1 == [111, 222])
assert np.all(z1 == [1111, 2222])
y2, z2 = t([[100], [200]])
assert np.shape(y2) == np.shape(z2) == (2, 2)
assert np.all(y2 == [[111, 122], [211, 222]])
assert np.all(z2 == [[1111, 2122], [1211, 2222]])
with pytest.raises(ValueError):
# Doesn't broadcast
y3, z3 = t([100, 200, 300])
def test_2d_array_parameters_2d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_2([[1, 2], [3, 4]], [[10, 20], [30, 40]],
[[1000, 2000], [3000, 4000]])
y1, z1 = t([[100, 200], [300, 400]])
assert np.shape(y1) == np.shape(z1) == (2, 2)
assert np.all(y1 == [[111, 222], [333, 444]])
assert np.all(z1 == [[1111, 2222], [3333, 4444]])
y2, z2 = t([[[[100]], [[200]]], [[[300]], [[400]]]])
assert np.shape(y2) == np.shape(z2) == (2, 2, 2, 2)
assert np.all(y2 == [[[[111, 122], [133, 144]],
[[211, 222], [233, 244]]],
[[[311, 322], [333, 344]],
[[411, 422], [433, 444]]]])
assert np.all(z2 == [[[[1111, 2122], [3133, 4144]],
[[1211, 2222], [3233, 4244]]],
[[[1311, 2322], [3333, 4344]],
[[1411, 2422], [3433, 4444]]]])
with pytest.raises(ValueError):
# Doesn't broadcast
y3, z3 = t([[100, 200, 300], [400, 500, 600]])
def test_mixed_array_parameters_1d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_2([[[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]],
[[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]]],
[1, 2, 3], [100, 200, 300])
y1, z1 = t([10, 20, 30])
assert np.shape(y1) == np.shape(z1) == (2, 2, 3)
assert_allclose(y1, [[[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]],
[[11.07, 22.08, 33.09], [11.10, 22.11, 33.12]]])
assert_allclose(z1, [[[111.01, 222.02, 333.03],
[111.04, 222.05, 333.06]],
[[111.07, 222.08, 333.09],
[111.10, 222.11, 333.12]]])
y2, z2 = t([[[[10]]], [[[20]]], [[[30]]]])
assert np.shape(y2) == np.shape(z2) == (3, 2, 2, 3)
assert_allclose(y2, [[[[11.01, 12.02, 13.03],
[11.04, 12.05, 13.06]],
[[11.07, 12.08, 13.09],
[11.10, 12.11, 13.12]]],
[[[21.01, 22.02, 23.03],
[21.04, 22.05, 23.06]],
[[21.07, 22.08, 23.09],
[21.10, 22.11, 23.12]]],
[[[31.01, 32.02, 33.03],
[31.04, 32.05, 33.06]],
[[31.07, 32.08, 33.09],
[31.10, 32.11, 33.12]]]])
assert_allclose(z2, [[[[111.01, 212.02, 313.03],
[111.04, 212.05, 313.06]],
[[111.07, 212.08, 313.09],
[111.10, 212.11, 313.12]]],
[[[121.01, 222.02, 323.03],
[121.04, 222.05, 323.06]],
[[121.07, 222.08, 323.09],
[121.10, 222.11, 323.12]]],
[[[131.01, 232.02, 333.03],
[131.04, 232.05, 333.06]],
[[131.07, 232.08, 333.09],
[131.10, 232.11, 333.12]]]])
class TInputFormatter(Model):
"""
A toy model to test input/output formatting.
"""
inputs = ('x', 'y')
outputs = ('x', 'y')
@staticmethod
def evaluate(x, y):
return x, y
def test_format_input_scalars():
model = TInputFormatter()
result = model(1, 2)
assert result == (1, 2)
def test_format_input_arrays():
model = TInputFormatter()
result = model([1, 1], [2, 2])
assert_allclose(result, (np.array([1, 1]), np.array([2, 2])))
def test_format_input_arrays_transposed():
model = TInputFormatter()
input = np.array([[1, 1]]).T, np.array([[2, 2]]).T
result = model(*input)
assert_allclose(result, input)
|
cb37b625ea81347b75cc45fd30de96f2aa8cdef948117b12893e7182862c2a4e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests that relate to using quantities/units on parameters of models.
"""
import numpy as np
import pytest
from astropy.modeling.core import Model, Fittable1DModel, InputParameterError
from astropy.modeling.parameters import Parameter, ParameterDefinitionError
from astropy.modeling.models import (Gaussian1D, Pix2Sky_TAN, RotateNative2Celestial,
Rotation2D)
from astropy import units as u
from astropy.units import UnitsError
from astropy.tests.helper import assert_quantity_allclose
from astropy import coordinates as coord
class BaseTestModel(Fittable1DModel):
@staticmethod
def evaluate(x, a):
return x
def test_parameter_quantity():
"""
Basic tests for initializing general models (that do not require units)
with parameters that have units attached.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
assert g.amplitude.value == 1.0
assert g.amplitude.unit is u.J
assert g.mean.value == 1.0
assert g.mean.unit is u.m
assert g.stddev.value == 0.1
assert g.stddev.unit is u.m
def test_parameter_set_quantity():
"""
Make sure that parameters that start off as quantities can be set to any
other quantity, regardless of whether the units of the new quantity are
compatible with the original ones.
We basically leave it up to the evaluate method to raise errors if there
are issues with incompatible units, and we don't check for consistency
at the parameter level.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Try equivalent units
g.amplitude = 4 * u.kJ
assert_quantity_allclose(g.amplitude, 4 * u.kJ)
g.mean = 3 * u.km
assert_quantity_allclose(g.mean, 3 * u.km)
g.stddev = 2 * u.mm
assert_quantity_allclose(g.stddev, 2 * u.mm)
# Try different units
g.amplitude = 2 * u.s
assert_quantity_allclose(g.amplitude, 2 * u.s)
g.mean = 2 * u.Jy
assert_quantity_allclose(g.mean, 2 * u.Jy)
def test_parameter_lose_units():
"""
Check that parameters that have been set to a quantity that are then set to
a value with no units raise an exception. We do this because setting a
parameter to a value with no units is ambiguous if units were set before:
if a paramter is 1 * u.Jy and the parameter is then set to 4, does this mean
2 without units, or 2 * u.Jy?
"""
g = Gaussian1D(1 * u.Jy, 3, 0.1)
with pytest.raises(UnitsError) as exc:
g.amplitude = 2
assert exc.value.args[0] == ("The 'amplitude' parameter should be given as "
"a Quantity because it was originally "
"initialized as a Quantity")
def test_parameter_add_units():
"""
On the other hand, if starting from a parameter with no units, we should be
able to add units since this is unambiguous.
"""
g = Gaussian1D(1, 3, 0.1)
g.amplitude = 2 * u.Jy
assert_quantity_allclose(g.amplitude, 2 * u.Jy)
def test_parameter_change_unit():
"""
Test that changing the unit on a parameter does not work. This is an
ambiguous operation because it's not clear if it means that the value should
be converted or if the unit should be changed without conversion.
"""
g = Gaussian1D(1, 1 * u.m, 0.1 * u.m)
# Setting a unit on a unitless parameter should not work
with pytest.raises(ValueError) as exc:
g.amplitude.unit = u.Jy
assert exc.value.args[0] == ("Cannot attach units to parameters that were "
"not initially specified with units")
# But changing to another unit should not, even if it is an equivalent unit
with pytest.raises(ValueError) as exc:
g.mean.unit = u.cm
assert exc.value.args[0] == ("Cannot change the unit attribute directly, "
"instead change the parameter to a new quantity")
def test_parameter_set_value():
"""
Test that changing the value on a parameter works as expected.
"""
g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m)
# To set a parameter to a quantity, we simply do
g.amplitude = 2 * u.Jy
# If we try setting the value, we need to pass a non-quantity value
# TODO: determine whether this is the desired behavior?
g.amplitude.value = 4
assert_quantity_allclose(g.amplitude, 4 * u.Jy)
assert g.amplitude.value == 4
assert g.amplitude.unit is u.Jy
# If we try setting it to a Quantity, we raise an error
with pytest.raises(TypeError) as exc:
g.amplitude.value = 3 * u.Jy
assert exc.value.args[0] == ("The .value property on parameters should be set to "
"unitless values, not Quantity objects. To set a "
"parameter to a quantity simply set the parameter "
"directly without using .value")
def test_parameter_quantity_property():
"""
Test that the quantity property of Parameters behaves as expected
"""
# Since parameters have a .value and .unit parameter that return just the
# value and unit respectively, we also have a .quantity parameter that
# returns a Quantity instance.
g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m)
assert_quantity_allclose(g.amplitude.quantity, 1 * u.Jy)
# Setting a parameter to a quantity changes the value and the default unit
g.amplitude.quantity = 5 * u.mJy
assert g.amplitude.value == 5
assert g.amplitude.unit is u.mJy
# And we can also set the parameter to a value with different units
g.amplitude.quantity = 4 * u.s
assert g.amplitude.value == 4
assert g.amplitude.unit is u.s
# But not to a value without units
with pytest.raises(TypeError) as exc:
g.amplitude.quantity = 3
assert exc.value.args[0] == "The .quantity attribute should be set to a Quantity object"
def test_parameter_default_units_match():
# If the unit and default quantity units are different, raise an error
with pytest.raises(ParameterDefinitionError) as exc:
class TestC(Fittable1DModel):
a = Parameter(default=1.0 * u.m, unit=u.Jy)
assert exc.value.args[0] == ("parameter default 1.0 m does not have units "
"equivalent to the required unit Jy")
@pytest.mark.parametrize(('unit', 'default'), ((u.m, 1.0), (None, 1 * u.m)))
def test_parameter_defaults(unit, default):
"""
Test that default quantities are correctly taken into account
"""
class TestModel(BaseTestModel):
a = Parameter(default=default, unit=unit)
# TODO: decide whether the default property should return a value or
# a quantity?
# The default unit and value should be set on the class
assert TestModel.a.unit == u.m
assert TestModel.a.default == 1.0
# Check that the default unit and value are also set on a class instance
m = TestModel()
assert m.a.unit == u.m
assert m.a.default == m.a.value == 1.0
# If the parameter is set to a different value, the default is still the
# internal default
m = TestModel(2.0 * u.m)
assert m.a.unit == u.m
assert m.a.value == 2.0
assert m.a.default == 1.0
# Instantiate with a different, but compatible unit
m = TestModel(2.0 * u.pc)
assert m.a.unit == u.pc
assert m.a.value == 2.0
# The default is still in the original units
# TODO: but how do we know what those units are if we don't return a
# quantity?
assert m.a.default == 1.0
# Initialize with a completely different unit
m = TestModel(2.0 * u.Jy)
assert m.a.unit == u.Jy
assert m.a.value == 2.0
# TODO: this illustrates why the default doesn't make sense anymore
assert m.a.default == 1.0
# Instantiating with different units works, and just replaces the original unit
with pytest.raises(InputParameterError) as exc:
TestModel(1.0)
assert exc.value.args[0] == ("TestModel.__init__() requires a "
"Quantity for parameter 'a'")
def test_parameter_quantity_arithmetic():
"""
Test that arithmetic operations with properties that have units return the
appropriate Quantities.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Addition should work if units are compatible
assert g.mean + (1 * u.m) == 2 * u.m
assert (1 * u.m) + g.mean == 2 * u.m
# Multiplication by a scalar should also preserve the quantity-ness
assert g.mean * 2 == (2 * u.m)
assert 2 * g.mean == (2 * u.m)
# Multiplication by a quantity should result in units being multiplied
assert g.mean * (2 * u.m) == (2 * (u.m ** 2))
assert (2 * u.m) * g.mean == (2 * (u.m ** 2))
# Negation should work properly too
assert -g.mean == (-1 * u.m)
assert abs(-g.mean) == g.mean
# However, addition of a quantity + scalar should not work
with pytest.raises(UnitsError) as exc:
g.mean + 1
assert exc.value.args[0] == ("Can only apply 'add' function to "
"dimensionless quantities when other argument "
"is not a quantity (unless the latter is all "
"zero/infinity/nan)")
with pytest.raises(UnitsError) as exc:
1 + g.mean
assert exc.value.args[0] == ("Can only apply 'add' function to "
"dimensionless quantities when other argument "
"is not a quantity (unless the latter is all "
"zero/infinity/nan)")
def test_parameter_quantity_comparison():
"""
Basic test of comparison operations on properties with units.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Essentially here we are checking that parameters behave like Quantity
assert g.mean == 1 * u.m
assert 1 * u.m == g.mean
assert g.mean != 1
assert 1 != g.mean
assert g.mean < 2 * u.m
assert 2 * u.m > g.mean
with pytest.raises(UnitsError) as exc:
g.mean < 2
assert exc.value.args[0] == ("Can only apply 'less' function to "
"dimensionless quantities when other argument "
"is not a quantity (unless the latter is all "
"zero/infinity/nan)")
with pytest.raises(UnitsError) as exc:
2 > g.mean
assert exc.value.args[0] == ("Can only apply 'less' function to "
"dimensionless quantities when other argument "
"is not a quantity (unless the latter is all "
"zero/infinity/nan)")
g = Gaussian1D([1, 2] * u.J, [1, 2] * u.m, [0.1, 0.2] * u.m)
assert np.all(g.mean == [1, 2] * u.m)
assert np.all([1, 2] * u.m == g.mean)
assert np.all(g.mean != [1, 2])
assert np.all([1, 2] != g.mean)
with pytest.raises(UnitsError) as exc:
g.mean < [3, 4]
assert exc.value.args[0] == ("Can only apply 'less' function to "
"dimensionless quantities when other argument "
"is not a quantity (unless the latter is all "
"zero/infinity/nan)")
with pytest.raises(UnitsError) as exc:
[3, 4] > g.mean
assert exc.value.args[0] == ("Can only apply 'less' function to "
"dimensionless quantities when other argument "
"is not a quantity (unless the latter is all "
"zero/infinity/nan)")
def test_parameters_compound_models():
tan = Pix2Sky_TAN()
sky_coords = coord.SkyCoord(ra=5.6, dec=-72, unit=u.deg)
lon_pole = 180 * u.deg
n2c = RotateNative2Celestial(sky_coords.ra, sky_coords.dec, lon_pole)
rot = Rotation2D(23)
m = rot | n2c
|
1fea0bace251ba13ee32f4cb3c119de35ab78c4f24c8dab05c2d625976811bad | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for polynomial models."""
import os
from itertools import product
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.modeling import fitting
from astropy import wcs
from astropy.io import fits
from astropy.modeling.polynomial import (Chebyshev1D, Hermite1D, Legendre1D, Polynomial1D,
Chebyshev2D, Hermite2D, Legendre2D, Polynomial2D, SIP,
PolynomialBase, OrthoPolynomialBase)
from astropy.modeling.functional_models import Linear1D
from astropy.modeling.mappings import Identity
from astropy.utils.data import get_pkg_data_filename
try:
from scipy import optimize # pylint: disable=W0611
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
linear1d = {
Chebyshev1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': 1.2}}
},
Hermite1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': 1.2}}
},
Legendre1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': 1.2}}
},
Polynomial1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': 1.2}}
},
Linear1D: {
'args': (),
'kwargs': {},
'parameters': {'intercept': 1.2, 'slope': 23.1},
'constraints': {'fixed': {'intercept': 1.2}}
}
}
linear2d = {
Chebyshev2D: {
'args': (1, 1),
'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2},
'constraints': {'fixed': {'c0_0': 1.2}}
},
Hermite2D: {
'args': (1, 1),
'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2},
'constraints': {'fixed': {'c0_0': 1.2}}
},
Legendre2D: {
'args': (1, 1),
'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2},
'constraints': {'fixed': {'c0_0': 1.2}}
},
Polynomial2D: {
'args': (1,),
'kwargs': {},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3},
'constraints': {'fixed': {'c0_0': 1.2}}
}
}
@pytest.mark.skipif('not HAS_SCIPY')
class TestFitting:
"""Test linear fitter with polynomial models."""
def setup_class(self):
self.N = 100
self.M = 100
self.x1 = np.linspace(1, 10, 100)
self.y2, self.x2 = np.mgrid[:100, :83]
rsn = np.random.RandomState(0)
self.n1 = rsn.randn(self.x1.size) * .1
self.n2 = rsn.randn(self.x2.size)
self.n2.shape = self.x2.shape
self.linear_fitter = fitting.LinearLSQFitter()
self.non_linear_fitter = fitting.LevMarLSQFitter()
# TODO: Most of these test cases have some pretty repetitive setup that we
# could probably factor out
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear1d, key=str), (False, True))))
def test_linear_fitter_1D(self, model_class, constraints):
"""Test fitting with LinearLSQFitter"""
model_args = linear1d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
y1 = model(self.x1)
model_lin = self.linear_fitter(model, self.x1, y1 + self.n1)
if constraints:
# For the constraints tests we're not checking the overall fit,
# just that the constraint was maintained
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_lin, param).value == expected
else:
assert_allclose(model_lin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear1d, key=str), (False, True))))
def test_non_linear_fitter_1D(self, model_class, constraints):
"""Test fitting with non-linear LevMarLSQFitter"""
model_args = linear1d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
y1 = model(self.x1)
model_nlin = self.non_linear_fitter(model, self.x1, y1 + self.n1)
if constraints:
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_nlin, param).value == expected
else:
assert_allclose(model_nlin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear2d, key=str), (False, True))))
def test_linear_fitter_2D(self, model_class, constraints):
"""Test fitting with LinearLSQFitter"""
model_args = linear2d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
z = model(self.x2, self.y2)
model_lin = self.linear_fitter(model, self.x2, self.y2, z + self.n2)
if constraints:
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_lin, param).value == expected
else:
assert_allclose(model_lin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear2d, key=str), (False, True))))
def test_non_linear_fitter_2D(self, model_class, constraints):
"""Test fitting with non-linear LevMarLSQFitter"""
model_args = linear2d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
z = model(self.x2, self.y2)
model_nlin = self.non_linear_fitter(model, self.x2, self.y2,
z + self.n2)
if constraints:
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_nlin, param).value == expected
else:
assert_allclose(model_nlin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize('model_class',
[cls for cls in list(linear1d) + list(linear2d)
if isinstance(cls, PolynomialBase)])
def test_polynomial_init_with_constraints(model_class):
"""
Test that polynomial models can be instantiated with constraints, but no
parameters specified.
Regression test for https://github.com/astropy/astropy/issues/3606
"""
# Just determine which parameter to place a constraint on; it doesn't
# matter which parameter it is to exhibit the problem so long as it's a
# valid parameter for the model
if '1D' in model_class.__name__:
param = 'c0'
else:
param = 'c0_0'
if issubclass(model_class, OrthoPolynomialBase):
degree = (2, 2)
else:
degree = (2,)
m = model_class(*degree, fixed={param: True})
assert m.fixed[param] is True
assert getattr(m, param).fixed is True
def test_sip_hst():
"""Test SIP against astropy.wcs"""
test_file = get_pkg_data_filename(os.path.join('data', 'hst_sip.hdr'))
hdr = fits.Header.fromtextfile(test_file)
crpix1 = hdr['CRPIX1']
crpix2 = hdr['CRPIX2']
wobj = wcs.WCS(hdr)
a_pars = dict(**hdr['A_*'])
b_pars = dict(**hdr['B_*'])
a_order = a_pars.pop('A_ORDER')
b_order = b_pars.pop('B_ORDER')
sip = SIP([crpix1, crpix2], a_order, b_order, a_pars, b_pars)
coords = [1, 1]
rel_coords = [1 - crpix1, 1 - crpix2]
astwcs_result = wobj.sip_pix2foc([coords], 1)[0] - rel_coords
assert_allclose(sip(1, 1), astwcs_result)
def test_sip_irac():
"""Test forward and inverse SIP againts astropy.wcs"""
test_file = get_pkg_data_filename(os.path.join('data', 'irac_sip.hdr'))
hdr = fits.Header.fromtextfile(test_file)
crpix1 = hdr['CRPIX1']
crpix2 = hdr['CRPIX2']
wobj = wcs.WCS(hdr)
a_pars = dict(**hdr['A_*'])
b_pars = dict(**hdr['B_*'])
ap_pars = dict(**hdr['AP_*'])
bp_pars = dict(**hdr['BP_*'])
a_order = a_pars.pop('A_ORDER')
b_order = b_pars.pop('B_ORDER')
ap_order = ap_pars.pop('AP_ORDER')
bp_order = bp_pars.pop('BP_ORDER')
del a_pars['A_DMAX']
del b_pars['B_DMAX']
pix = [200, 200]
rel_pix = [200 - crpix1, 200 - crpix2]
sip = SIP([crpix1, crpix2], a_order, b_order, a_pars, b_pars,
ap_order=ap_order, ap_coeff=ap_pars, bp_order=bp_order,
bp_coeff=bp_pars)
foc = wobj.sip_pix2foc([pix], 1)
newpix = wobj.sip_foc2pix(foc, 1)[0]
assert_allclose(sip(*pix), foc[0] - rel_pix)
assert_allclose(sip.inverse(*foc[0]) +
foc[0] - rel_pix, newpix - pix)
def test_sip_no_coeff():
sip = SIP([10, 12], 2, 2)
assert_allclose(sip.sip1d_a.parameters, [0., 0., 0])
assert_allclose(sip.sip1d_b.parameters, [0., 0., 0])
with pytest.raises(NotImplementedError):
sip.inverse
@pytest.mark.parametrize('cls', (Polynomial1D, Chebyshev1D, Legendre1D,
Polynomial2D, Chebyshev2D, Legendre2D))
def test_zero_degree_polynomial(cls):
"""
A few tests that degree=0 polynomials are correctly evaluated and
fitted.
Regression test for https://github.com/astropy/astropy/pull/3589
"""
if cls.n_inputs == 1: # Test 1D polynomials
p1 = cls(degree=0, c0=1)
assert p1(0) == 1
assert np.all(p1(np.zeros(5)) == np.ones(5))
x = np.linspace(0, 1, 100)
# Add a little noise along a straight line
y = 1 + np.random.uniform(0, 0.1, len(x))
p1_init = cls(degree=0)
fitter = fitting.LinearLSQFitter()
p1_fit = fitter(p1_init, x, y)
# The fit won't be exact of course, but it should get close to within
# 1%
assert_allclose(p1_fit.c0, 1, atol=0.10)
elif cls.n_inputs == 2: # Test 2D polynomials
if issubclass(cls, OrthoPolynomialBase):
p2 = cls(x_degree=0, y_degree=0, c0_0=1)
else:
p2 = cls(degree=0, c0_0=1)
assert p2(0, 0) == 1
assert np.all(p2(np.zeros(5), np.zeros(5)) == np.ones(5))
y, x = np.mgrid[0:1:100j, 0:1:100j]
z = (1 + np.random.uniform(0, 0.1, x.size)).reshape(100, 100)
if issubclass(cls, OrthoPolynomialBase):
p2_init = cls(x_degree=0, y_degree=0)
else:
p2_init = cls(degree=0)
fitter = fitting.LinearLSQFitter()
p2_fit = fitter(p2_init, x, y, z)
assert_allclose(p2_fit.c0_0, 1, atol=0.10)
@pytest.mark.skipif('not HAS_SCIPY')
def test_2d_orthopolynomial_in_compound_model():
"""
Ensure that OrthoPolynomialBase (ie. Chebyshev2D & Legendre2D) models get
evaluated & fitted correctly when part of a compound model.
Regression test for https://github.com/astropy/astropy/pull/6085.
"""
y, x = np.mgrid[0:5, 0:5]
z = x + y
fitter = fitting.LevMarLSQFitter()
simple_model = Chebyshev2D(2, 2)
simple_fit = fitter(simple_model, x, y, z)
fitter = fitting.LevMarLSQFitter() # re-init to compare like with like
compound_model = Identity(2) | Chebyshev2D(2, 2)
compound_fit = fitter(compound_model, x, y, z)
assert_allclose(simple_fit(x, y), compound_fit(x, y), atol=1e-15)
|
8359d493a9b5945a419a249d640cfa9a15d9e502c0c49e6c98ef01e09e19f0e1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
from copy import deepcopy
import pickle
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from astropy.utils import minversion
from astropy.modeling.core import Model, ModelDefinitionError
from astropy.modeling.parameters import Parameter
from astropy.modeling.models import (Const1D, Shift, Scale, Rotation2D, Gaussian1D,
Gaussian2D, Polynomial1D, Polynomial2D,
Chebyshev2D, Legendre2D, Chebyshev1D, Legendre1D,
AffineTransformation2D, Identity, Mapping,
Tabular1D)
try:
import scipy
from scipy import optimize # pylint: disable=W0611
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
HAS_SCIPY_14 = HAS_SCIPY and minversion(scipy, "0.14")
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, 5.0),
(lambda x, y: x - y, -1.0),
(lambda x, y: x * y, 6.0),
(lambda x, y: x / y, 2.0 / 3.0),
(lambda x, y: x ** y, 8.0)])
def test_two_model_class_arithmetic_1d(expr, result):
# Const1D is perhaps the simplest model to test basic arithmetic with.
# TODO: Should define more tests later on for more complicated
# combinations of models
S = expr(Const1D, Const1D)
assert issubclass(S, Model)
assert S.n_inputs == 1
assert S.n_outputs == 1
# Initialize an instance of the model, providing values for the two
# "amplitude" parameters
s = S(2, 3)
# It shouldn't matter what input we evaluate on since this is a constant
# function
out = s(0)
assert out == result
assert isinstance(out, float)
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, [5.0, 5.0]),
(lambda x, y: x - y, [-1.0, -1.0]),
(lambda x, y: x * y, [6.0, 6.0]),
(lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]),
(lambda x, y: x ** y, [8.0, 8.0])])
def test_model_set(expr, result):
s = expr(Const1D((2, 2), n_models=2), Const1D((3, 3), n_models=2))
out = s(0, model_set_axis=False)
assert_array_equal(out, result)
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, [5.0, 5.0]),
(lambda x, y: x - y, [-1.0, -1.0]),
(lambda x, y: x * y, [6.0, 6.0]),
(lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]),
(lambda x, y: x ** y, [8.0, 8.0])])
def test_model_set_raises_value_error(expr, result):
"""Check that creating model sets with components whose _n_models are
different raise a value error
"""
with pytest.raises(ValueError):
s = expr(Const1D((2, 2), n_models=2), Const1D(3, n_models=1))
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, 5.0),
(lambda x, y: x - y, -1.0),
(lambda x, y: x * y, 6.0),
(lambda x, y: x / y, 2.0 / 3.0),
(lambda x, y: x ** y, 8.0)])
def test_two_model_instance_arithmetic_1d(expr, result):
"""
Like test_two_model_class_arithmetic_1d, but creates a new model from two
model *instances* with fixed parameters.
"""
s = expr(Const1D(2), Const1D(3))
assert isinstance(s, Model)
assert s.n_inputs == 1
assert s.n_outputs == 1
out = s(0)
assert out == result
assert isinstance(out, float)
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, 5.0),
(lambda x, y: x - y, -1.0),
(lambda x, y: x * y, 6.0),
(lambda x, y: x / y, 2.0 / 3.0),
(lambda x, y: x ** y, 8.0)])
def test_two_model_mixed_arithmetic_1d(expr, result):
"""
Like test_two_model_class_arithmetic_1d, but creates a new model from an
expression of one model class with one model instance (and vice-versa).
"""
S1 = expr(Const1D, Const1D(3))
S2 = expr(Const1D(2), Const1D)
for cls in (S1, S2):
assert issubclass(cls, Model)
assert cls.n_inputs == 1
assert cls.n_outputs == 1
# Requires values for both amplitudes even though one of them them has a
# default
# TODO: We may wish to fix that eventually, so that if a parameter has a
# default it doesn't *have* to be given in the init
s1 = S1(2, 3)
s2 = S2(2, 3)
for out in (s1(0), s2(0)):
assert out == result
assert isinstance(out, float)
def test_simple_two_model_class_compose_1d():
"""
Shift and Scale are two of the simplest models to test model composition
with.
"""
S1 = Shift | Scale # First shift then scale
assert issubclass(S1, Model)
assert S1.n_inputs == 1
assert S1.n_outputs == 1
s1 = S1(2, 3) # Shift by 2 and scale by 3
assert s1(1) == 9.0
S2 = Scale | Shift # First scale then shift
assert issubclass(S2, Model)
assert S2.n_inputs == 1
assert S2.n_outputs == 1
s2 = S2(2, 3) # Scale by 2 then shift by 3
assert s2(1) == 5.0
# Test with array inputs
assert_array_equal(s2([1, 2, 3]), [5.0, 7.0, 9.0])
def test_simple_two_model_class_compose_2d():
"""
A simple example consisting of two rotations.
"""
R = Rotation2D | Rotation2D
assert issubclass(R, Model)
assert R.n_inputs == 2
assert R.n_outputs == 2
r1 = R(45, 45) # Rotate twice by 45 degrees
assert_allclose(r1(0, 1), (-1, 0), atol=1e-10)
r2 = R(90, 90) # Rotate twice by 90 degrees
assert_allclose(r2(0, 1), (0, -1), atol=1e-10)
# Compose R with itself to produce 4 rotations
R2 = R | R
r3 = R2(45, 45, 45, 45)
assert_allclose(r3(0, 1), (0, -1), atol=1e-10)
def test_n_submodels():
"""
Test that CompoundModel.n_submodels properly returns the number
of components.
"""
g2 = Gaussian1D() + Gaussian1D()
assert g2.n_submodels() == 2
g3 = g2 + Gaussian1D()
assert g3.n_submodels() == 3
g5 = g3 | g2
assert g5.n_submodels() == 5
g7 = g5 / g2
assert g7.n_submodels() == 7
# make sure it works as class method
p = Polynomial1D + Polynomial1D
assert p.n_submodels() == 2
def test_expression_formatting():
"""
Test that the expression strings from compound models are formatted
correctly.
"""
# For the purposes of this test it doesn't matter a great deal what
# model(s) are used in the expression, I don't think
G = Gaussian1D
G2 = Gaussian2D
M = G + G
assert M._format_expression() == '[0] + [1]'
M = G + G + G
assert M._format_expression() == '[0] + [1] + [2]'
M = G + G * G
assert M._format_expression() == '[0] + [1] * [2]'
M = G * G + G
assert M._format_expression() == '[0] * [1] + [2]'
M = G + G * G + G
assert M._format_expression() == '[0] + [1] * [2] + [3]'
M = (G + G) * (G + G)
assert M._format_expression() == '([0] + [1]) * ([2] + [3])'
# This example uses parentheses in the expression, but those won't be
# preserved in the expression formatting since they technically aren't
# necessary, and there's no way to know that they were originally
# parenthesized (short of some deep, and probably not worthwhile
# introspection)
M = (G * G) + (G * G)
assert M._format_expression() == '[0] * [1] + [2] * [3]'
M = G ** G
assert M._format_expression() == '[0] ** [1]'
M = G + G ** G
assert M._format_expression() == '[0] + [1] ** [2]'
M = (G + G) ** G
assert M._format_expression() == '([0] + [1]) ** [2]'
M = G + G | G
assert M._format_expression() == '[0] + [1] | [2]'
M = G + (G | G)
assert M._format_expression() == '[0] + ([1] | [2])'
M = G & G | G2
assert M._format_expression() == '[0] & [1] | [2]'
M = G & (G | G)
assert M._format_expression() == '[0] & ([1] | [2])'
def test_indexing_on_class():
"""
Test indexing on compound model class objects, including cases where the
submodels are classes, as well as instances, or both.
"""
g = Gaussian1D(1, 2, 3, name='g')
p = Polynomial1D(2, name='p')
M = Gaussian1D + Const1D
assert M[0] is Gaussian1D
assert M[1] is Const1D
assert M['Gaussian1D'] is M[0]
assert M['Const1D'] is M[1]
M = Gaussian1D + p
assert M[0] is Gaussian1D
assert isinstance(M['p'], Polynomial1D)
m = g + p
assert isinstance(m[0], Gaussian1D)
assert isinstance(m[1], Polynomial1D)
assert isinstance(m['g'], Gaussian1D)
assert isinstance(m['p'], Polynomial1D)
# Test negative indexing
assert isinstance(m[-1], Polynomial1D)
assert isinstance(m[-2], Gaussian1D)
with pytest.raises(IndexError):
m[42]
with pytest.raises(IndexError):
m['foobar']
# TODO: It would be good if there were an easier way to interrogate a compound
# model class for what expression it represents. Not sure what that would look
# like though.
def test_slicing_on_class():
"""
Test slicing a simple compound model class using integers.
"""
A = Const1D.rename('A')
B = Const1D.rename('B')
C = Const1D.rename('C')
D = Const1D.rename('D')
E = Const1D.rename('E')
F = Const1D.rename('F')
M = A + B - C * D / E ** F
assert M[0:1] is A
# This test will also check that the correct parameter names are generated
# for each slice (fairly trivial in this case since all the submodels have
# the same parameter, but if any corner cases are found that aren't covered
# by this test we can do something different...)
assert M[0:1].param_names == ('amplitude',)
# This looks goofy but if you slice by name to the sub-model of the same
# name it should just return that model, logically.
assert M['A':'A'] is A
assert M['A':'A'].param_names == ('amplitude',)
assert M[5:6] is F
assert M[5:6].param_names == ('amplitude',)
assert M['F':'F'] is F
assert M['F':'F'].param_names == ('amplitude',)
# 1 + 2
assert M[:2](1, 2)(0) == 3
assert M[:2].param_names == ('amplitude_0', 'amplitude_1')
assert M[:'B'](1, 2)(0) == 3
assert M[:'B'].param_names == ('amplitude_0', 'amplitude_1')
# 2 - 3
assert M[1:3](2, 3)(0) == -1
assert M[1:3].param_names == ('amplitude_1', 'amplitude_2')
assert M['B':'C'](2, 3)(0) == -1
assert M['B':'C'].param_names == ('amplitude_1', 'amplitude_2')
# 3 * 4
assert M[2:4](3, 4)(0) == 12
assert M[2:4].param_names == ('amplitude_2', 'amplitude_3')
assert M['C':'D'](3, 4)(0) == 12
assert M['C':'D'].param_names == ('amplitude_2', 'amplitude_3')
# 4 / 5
assert M[3:5](4, 5)(0) == 0.8
assert M[3:5].param_names == ('amplitude_3', 'amplitude_4')
assert M['D':'E'](4, 5)(0) == 0.8
assert M['D':'E'].param_names == ('amplitude_3', 'amplitude_4')
# 5 ** 6
assert M[4:6](5, 6)(0) == 15625
assert M[4:6].param_names == ('amplitude_4', 'amplitude_5')
assert M['E':'F'](5, 6)(0) == 15625
assert M['E':'F'].param_names == ('amplitude_4', 'amplitude_5')
def test_slicing_on_instance():
"""
Test slicing a simple compound model class using integers.
"""
A = Const1D.rename('A')
B = Const1D.rename('B')
C = Const1D.rename('C')
D = Const1D.rename('D')
E = Const1D.rename('E')
F = Const1D.rename('F')
M = A + B - C * D / E ** F
m = M(1, 2, 3, 4, 5, 6)
assert isinstance(m[0:1], A)
assert isinstance(m['A':'A'], A)
assert isinstance(m[5:6], F)
assert isinstance(m['F':'F'], F)
# 1 + 2
assert m[:'B'](0) == 3
assert m[:'B'].param_names == ('amplitude_0', 'amplitude_1')
assert np.all(m[:'B'].parameters == [1, 2])
# 2 - 3
assert m['B':'C'](0) == -1
assert m['B':'C'].param_names == ('amplitude_1', 'amplitude_2')
assert np.all(m['B':'C'].parameters == [2, 3])
# 3 * 4
assert m['C':'D'](0) == 12
assert m['C':'D'].param_names == ('amplitude_2', 'amplitude_3')
assert np.all(m['C':'D'].parameters == [3, 4])
# 4 / 5
assert m['D':'E'](0) == 0.8
assert m['D':'E'].param_names == ('amplitude_3', 'amplitude_4')
assert np.all(m['D':'E'].parameters == [4, 5])
# 5 ** 6
assert m['E':'F'](0) == 15625
assert m['E':'F'].param_names == ('amplitude_4', 'amplitude_5')
assert np.all(m['E':'F'].parameters == [5, 6])
def test_indexing_on_instance():
"""Test indexing on compound model instances."""
M = Gaussian1D + Const1D
m = M(1, 0, 0.1, 2)
assert isinstance(m[0], Gaussian1D)
assert isinstance(m[1], Const1D)
assert isinstance(m['Gaussian1D'], Gaussian1D)
assert isinstance(m['Const1D'], Const1D)
# Test parameter equivalence
assert m[0].amplitude == 1 == m.amplitude_0
assert m[0].mean == 0 == m.mean_0
assert m[0].stddev == 0.1 == m.stddev_0
assert m[1].amplitude == 2 == m.amplitude_1
# Test that parameter value updates are symmetric between the compound
# model and the submodel returned by indexing
const = m[1]
m.amplitude_1 = 42
assert const.amplitude == 42
const.amplitude = 137
assert m.amplitude_1 == 137
# Similar couple of tests, but now where the compound model was created
# from model instances
g = Gaussian1D(1, 2, 3, name='g')
p = Polynomial1D(2, name='p')
m = g + p
assert m[0].name == 'g'
assert m[1].name == 'p'
assert m['g'].name == 'g'
assert m['p'].name == 'p'
poly = m[1]
m.c0_1 = 12345
assert poly.c0 == 12345
poly.c1 = 6789
assert m.c1_1 == 6789
# Ensure this did *not* modify the original models we used as templates
assert p.c0 == 0
assert p.c1 == 0
# Test negative indexing
assert isinstance(m[-1], Polynomial1D)
assert isinstance(m[-2], Gaussian1D)
with pytest.raises(IndexError):
m[42]
with pytest.raises(IndexError):
m['foobar']
def test_basic_compound_inverse():
"""
Test basic inversion of compound models in the limited sense supported for
models made from compositions and joins only.
"""
t = (Shift(2) & Shift(3)) | (Scale(2) & Scale(3)) | Rotation2D(90)
assert_allclose(t.inverse(*t(0, 1)), (0, 1))
@pytest.mark.parametrize('model', [
Shift(0) + Shift(0) | Shift(0),
Shift(0) - Shift(0) | Shift(0),
Shift(0) * Shift(0) | Shift(0),
Shift(0) / Shift(0) | Shift(0),
Shift(0) ** Shift(0) | Shift(0),
Gaussian1D(1, 2, 3) | Gaussian1D(4, 5, 6)])
def test_compound_unsupported_inverse(model):
"""
Ensure inverses aren't supported in cases where it shouldn't be.
"""
with pytest.raises(NotImplementedError):
model.inverse
def test_mapping_basic_permutations():
"""
Tests a couple basic examples of the Mapping model--specifically examples
that merely permute the outputs.
"""
x, y = Rotation2D(90)(1, 2)
RS = Rotation2D | Mapping((1, 0))
x_prime, y_prime = RS(90)(1, 2)
assert_allclose((x, y), (y_prime, x_prime))
# A more complicated permutation
M = Rotation2D & Scale
m = M(90, 2)
x, y, z = m(1, 2, 3)
MS = M | Mapping((2, 0, 1))
ms = MS(90, 2)
x_prime, y_prime, z_prime = ms(1, 2, 3)
assert_allclose((x, y, z), (y_prime, z_prime, x_prime))
def test_mapping_inverse():
"""Tests inverting a compound model that includes a `Mapping`."""
RS = Rotation2D & Scale
# Rotates 2 of the coordinates and scales the third--then rotates on a
# different axis and scales on the axis of rotation. No physical meaning
# here just a simple test
M = RS | Mapping([2, 0, 1]) | RS
m = M(12.1, 13.2, 14.3, 15.4)
assert_allclose((0, 1, 2), m.inverse(*m(0, 1, 2)), atol=1e-08)
def test_identity_input():
"""
Test a case where an Identity (or Mapping) model is the first in a chain
of composite models and thus is responsible for handling input broadcasting
properly.
Regression test for https://github.com/astropy/astropy/pull/3362
"""
ident1 = Identity(1)
shift = Shift(1)
rotation = Rotation2D(angle=90)
model = ident1 & shift | rotation
assert_allclose(model(1, 2), [-3.0, 1.0])
# Same test case but using class composition
TestModel = ident1 & Shift | Rotation2D
model = TestModel(offset_1=1, angle_2=90)
assert_allclose(model(1, 2), [-3.0, 1.0])
def test_slicing_on_instances_2():
"""
More slicing tests.
Regression test for https://github.com/embray/astropy/pull/10
"""
model_a = Shift(1, name='a')
model_b = Shift(2, name='b')
model_c = Rotation2D(3, name='c')
model_d = Scale(2, name='d')
model_e = Scale(3, name='e')
m = (model_a & model_b) | model_c | (model_d & model_e)
with pytest.raises(ModelDefinitionError):
# The slice can't actually be taken since the resulting model cannot be
# evaluated
assert m[1:].submodel_names == ('b', 'c', 'd', 'e')
assert m[:].submodel_names == ('a', 'b', 'c', 'd', 'e')
assert m['a':].submodel_names == ('a', 'b', 'c', 'd', 'e')
with pytest.raises(ModelDefinitionError):
assert m['c':'d'].submodel_names == ('c', 'd')
assert m[1:2].name == 'b'
assert m[2:7].submodel_names == ('c', 'd', 'e')
with pytest.raises(IndexError):
m['x']
with pytest.raises(IndexError):
m['a': 'r']
with pytest.raises(ModelDefinitionError):
assert m[-4:4].submodel_names == ('b', 'c', 'd')
with pytest.raises(ModelDefinitionError):
assert m[-4:-2].submodel_names == ('b', 'c')
def test_slicing_on_instances_3():
"""
Like `test_slicing_on_instances_2` but uses a compound model that does not
have any invalid slices due to the resulting model being invalid
(originally test_slicing_on_instances_2 passed without any
ModelDefinitionErrors being raised, but that was before we prevented
invalid models from being created).
"""
model_a = Shift(1, name='a')
model_b = Shift(2, name='b')
model_c = Gaussian1D(3, 0, 0.1, name='c')
model_d = Scale(2, name='d')
model_e = Scale(3, name='e')
m = (model_a + model_b) | model_c | (model_d + model_e)
assert m[1:].submodel_names == ('b', 'c', 'd', 'e')
assert m[:].submodel_names == ('a', 'b', 'c', 'd', 'e')
assert m['a':].submodel_names == ('a', 'b', 'c', 'd', 'e')
assert m['c':'d'].submodel_names == ('c', 'd')
assert m[1:2].name == 'b'
assert m[2:7].submodel_names == ('c', 'd', 'e')
with pytest.raises(IndexError):
m['x']
with pytest.raises(IndexError):
m['a': 'r']
assert m[-4:4].submodel_names == ('b', 'c', 'd')
assert m[-4:-2].submodel_names == ('b', 'c')
def test_slicing_on_instance_with_parameterless_model():
"""
Regression test to fix an issue where the indices attached to parameter
names on a compound model were not handled properly when one or more
submodels have no parameters. This was especially evident in slicing.
"""
p2 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3)
p1 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3)
mapping = Mapping((0, 1, 0, 1))
offx = Shift(-2, name='x_translation')
offy = Shift(-1, name='y_translation')
aff = AffineTransformation2D(matrix=[[1, 2], [3, 4]], name='rotation')
model = mapping | (p1 & p2) | (offx & offy) | aff
assert model.param_names == ('c0_0_1', 'c1_0_1', 'c0_1_1',
'c0_0_2', 'c1_0_2', 'c0_1_2',
'offset_3', 'offset_4',
'matrix_5', 'translation_5')
assert model(1, 2) == (23.0, 53.0)
m = model[3:]
assert m.param_names == ('offset_3', 'offset_4', 'matrix_5',
'translation_5')
assert m(1, 2) == (1.0, 1.0)
def test_compound_model_with_nonstandard_broadcasting():
"""
Ensure that the ``standard_broadcasting`` flag is properly propagated when
creating compound models.
See the commit message for the commit in which this was added for more
details.
"""
offx = Shift(1)
offy = Shift(2)
rot = AffineTransformation2D([[0, -1], [1, 0]])
m = (offx & offy) | rot
x, y = m(0, 0)
assert x == -2
assert y == 1
# make sure conversion back to scalars is working properly
assert isinstance(x, float)
assert isinstance(y, float)
x, y = m([0, 1, 2], [0, 1, 2])
assert np.all(x == [-2, -3, -4])
assert np.all(y == [1, 2, 3])
def test_compound_model_classify_attributes():
"""
Regression test for an issue raised here:
https://github.com/astropy/astropy/pull/3231#discussion_r22221123
The issue is that part of the `help` implementation calls a utility
function called `inspect.classify_class_attrs`, which was leading to an
infinite recursion.
This is a useful test in its own right just in that it tests that compound
models can be introspected in some useful way without crashing--this works
as sort of a test of its somewhat complicated internal state management.
This test does not check any of the results of
`~inspect.classify_class_attrs`, though it might be useful to at some
point.
"""
inspect.classify_class_attrs(Gaussian1D + Gaussian1D)
def test_invalid_operands():
"""
Test that certain operators do not work with models whose inputs/outputs do
not match up correctly.
"""
with pytest.raises(ModelDefinitionError):
Rotation2D | Gaussian1D
with pytest.raises(ModelDefinitionError):
Rotation2D(90) | Gaussian1D(1, 0, 0.1)
with pytest.raises(ModelDefinitionError):
Rotation2D + Gaussian1D
with pytest.raises(ModelDefinitionError):
Rotation2D(90) + Gaussian1D(1, 0, 0.1)
class _ConstraintsTestA(Model):
stddev = Parameter(default=0, min=0, max=0.3)
mean = Parameter(default=0, fixed=True)
@staticmethod
def evaluate(stddev, mean):
return stddev, mean
class _ConstraintsTestB(Model):
mean = Parameter(default=0, fixed=True)
@staticmethod
def evaluate(mean):
return mean
@pytest.mark.parametrize('model',
[Gaussian1D(bounds={'stddev': (0, 0.3)}, fixed={'mean': True}) +
Gaussian1D(fixed={'mean': True}),
(_ConstraintsTestA + _ConstraintsTestB)()])
def test_inherit_constraints(model):
"""
Various tests for copying of constraint values between compound models and
their members.
There are two versions of this test: One where a compound model is created
from two model instances, and another where a compound model is created
from two model classes that have default constraints set on some of their
parameters.
Regression test for https://github.com/astropy/astropy/issues/3481
"""
# We have to copy the model before modifying it, otherwise the test fails
# if it is run twice in a row, because the state of the model instance
# would be preserved from one run to the next.
model = deepcopy(model)
# Lots of assertions in this test as there are multiple interfaces to
# parameter constraints
assert 'stddev_0' in model.bounds
assert model.bounds['stddev_0'] == (0, 0.3)
assert model.stddev_0.bounds == (0, 0.3)
assert 'mean_0' in model.fixed
assert model.fixed['mean_0'] is True
assert model.mean_0.fixed is True
assert 'mean_1' in model.fixed
assert model.fixed['mean_1'] is True
assert model.mean_1.fixed is True
# Great, all the constraints were inherited properly
# Now what about if we update them through the sub-models?
model[0].stddev.bounds = (0, 0.4)
assert model.bounds['stddev_0'] == (0, 0.4)
assert model.stddev_0.bounds == (0, 0.4)
assert model[0].stddev.bounds == (0, 0.4)
assert model[0].bounds['stddev'] == (0, 0.4)
model[0].bounds['stddev'] = (0.1, 0.5)
assert model.bounds['stddev_0'] == (0.1, 0.5)
assert model.stddev_0.bounds == (0.1, 0.5)
assert model[0].stddev.bounds == (0.1, 0.5)
assert model[0].bounds['stddev'] == (0.1, 0.5)
model[1].mean.fixed = False
assert model.fixed['mean_1'] is False
assert model.mean_1.fixed is False
assert model[1].mean.fixed is False
assert model[1].fixed['mean'] is False
model[1].fixed['mean'] = True
assert model.fixed['mean_1'] is True
assert model.mean_1.fixed is True
assert model[1].mean.fixed is True
assert model[1].fixed['mean'] is True
def test_compound_custom_inverse():
"""
Test that a compound model with a custom inverse has that inverse applied
when the inverse of another model, of which it is a component, is computed.
Regression test for https://github.com/astropy/astropy/issues/3542
"""
poly = Polynomial1D(1, c0=1, c1=2)
scale = Scale(1)
shift = Shift(1)
model1 = poly | scale
model1.inverse = poly
# model1 now has a custom inverse (the polynomial itself, ignoring the
# trivial scale factor)
model2 = shift | model1
assert_allclose(model2.inverse(1), (poly | shift.inverse)(1))
# Make sure an inverse is not allowed if the models were combined with the
# wrong operator, or if one of the models doesn't have an inverse defined
with pytest.raises(NotImplementedError):
(shift + model1).inverse
with pytest.raises(NotImplementedError):
(model1 & poly).inverse
@pytest.mark.parametrize('poly', [Chebyshev2D(1, 2), Polynomial2D(2), Legendre2D(1, 2),
Chebyshev1D(5), Legendre1D(5), Polynomial1D(5)])
def test_compound_with_polynomials(poly):
"""
Tests that polynomials are scaled when used in compound models.
Issue #3699
"""
poly.parameters = [1, 2, 3, 4, 1, 2]
shift = Shift(3)
model = poly | shift
x, y = np.mgrid[:20, :37]
result_compound = model(x, y)
result = shift(poly(x, y))
assert_allclose(result, result_compound)
# has to be defined at module level since pickling doesn't work right (in
# general) for classes defined in functions
class _TestPickleModel(Gaussian1D + Gaussian1D):
pass
def test_pickle_compound():
"""
Regression test for
https://github.com/astropy/astropy/issues/3867#issuecomment-114547228
"""
# Test pickling a compound model class
GG = Gaussian1D + Gaussian1D
GG2 = pickle.loads(pickle.dumps(GG))
assert GG.param_names == GG2.param_names
assert GG.__name__ == GG2.__name__
# Test that it works, or at least evaluates successfully
assert GG()(0.12345) == GG2()(0.12345)
# Test pickling a compound model instance
g1 = Gaussian1D(1.0, 0.0, 0.1)
g2 = Gaussian1D([2.0, 3.0], [0.0, 0.0], [0.2, 0.3])
m = g1 + g2
m2 = pickle.loads(pickle.dumps(m))
assert m.param_names == m2.param_names
assert m.__class__.__name__ == m2.__class__.__name__
assert np.all(m.parameters == m2.parameters)
assert np.all(m(0) == m2(0))
# Test pickling a concrete class
p = pickle.dumps(_TestPickleModel, protocol=0)
# Note: This is very dependent on the specific protocol, but the point of
# this test is that the "concrete" model is pickled in a very simple way
# that only specifies the module and class name, and is unpickled by
# re-importing the class from the module in which it was defined. This
# should still work for concrete subclasses of compound model classes that
# were dynamically generated through an expression
exp = b'castropy.modeling.tests.test_compound\n_TestPickleModel\np0\n.'
# When testing against the expected value we drop the memo length field
# at the end, which may differ between runs
assert p[:p.rfind(b'p')] == exp[:exp.rfind(b'p')]
assert pickle.loads(p) is _TestPickleModel
def test_update_parameters():
offx = Shift(1)
scl = Scale(2)
m = offx | scl
assert(m(1) == 4)
offx.offset = 42
assert(m(1) == 4)
m.factor_1 = 100
assert(m(1) == 200)
m2 = m | offx
assert(m2(1) == 242)
def test_name():
offx = Shift(1)
scl = Scale(2)
m = offx | scl
scl.name = "scale"
assert m._submodel_names == ('None_0', 'None_1')
assert m.name is None
m.name = "M"
assert m.name == "M"
m1 = m.rename("M1")
assert m.name == "M"
assert m1.name == "M1"
@pytest.mark.skipif("not HAS_SCIPY_14")
def test_tabular_in_compound():
"""
Issue #7411 - evaluate should not change the shape of the output.
"""
t = Tabular1D(points=([1, 5, 7],), lookup_table=[12, 15, 19],
bounds_error=False)
rot = Rotation2D(2)
p = Polynomial1D(1)
x = np.arange(12).reshape((3,4))
# Create a compound model which does ot execute Tabular.__call__,
# but model.evaluate and is followed by a Rotation2D which
# checks the exact shapes.
model = p & t | rot
x1, y1 = model(x, x)
assert x1.ndim == 2
assert y1.ndim == 2
|
77e027cbad87c074079b07d74a669178eeb25cc86423e0a77d321a551888bc58 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from inspect import signature
from numpy.testing import assert_allclose
from astropy.modeling.core import Model, custom_model
from astropy.modeling.parameters import Parameter
from astropy.modeling import models
class NonFittableModel(Model):
"""An example class directly subclassing Model for testing."""
a = Parameter()
def __init__(self, a, model_set_axis=None):
super().__init__(a, model_set_axis=model_set_axis)
@staticmethod
def evaluate():
pass
def test_Model_instance_repr_and_str():
m = NonFittableModel(42.5)
assert repr(m) == "<NonFittableModel(a=42.5)>"
assert (str(m) ==
"Model: NonFittableModel\n"
"Inputs: ()\n"
"Outputs: ()\n"
"Model set size: 1\n"
"Parameters:\n"
" a \n"
" ----\n"
" 42.5")
assert len(m) == 1
def test_Model_array_parameter():
model = models.Gaussian1D(4, 2, 1)
assert_allclose(model.param_sets, [[4], [2], [1]])
def test_inputless_model():
"""
Regression test for
https://github.com/astropy/astropy/pull/3772#issuecomment-101821641
"""
class TestModel(Model):
inputs = ()
outputs = ('y',)
a = Parameter()
@staticmethod
def evaluate(a):
return a
m = TestModel(1)
assert m.a == 1
assert m() == 1
# Test array-like output
m = TestModel([1, 2, 3], model_set_axis=False)
assert len(m) == 1
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[1, 2, 3], model_set_axis=0)
assert len(m) == 3
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=0)
assert len(m) == 2
assert np.all(m() == [[1, 2, 3], [4, 5, 6]])
def test_ParametericModel():
with pytest.raises(TypeError):
models.Gaussian1D(1, 2, 3, wrong=4)
def test_custom_model_signature():
"""
Tests that the signatures for the __init__ and __call__
methods of custom models are useful.
"""
@custom_model
def model_a(x):
return x
assert model_a.param_names == ()
assert model_a.n_inputs == 1
sig = signature(model_a.__init__)
assert list(sig.parameters.keys()) == ['self', 'args', 'meta', 'name', 'kwargs']
sig = signature(model_a.__call__)
assert list(sig.parameters.keys()) == ['self', 'x', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies']
@custom_model
def model_b(x, a=1, b=2):
return x + a + b
assert model_b.param_names == ('a', 'b')
assert model_b.n_inputs == 1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ['self', 'a', 'b', 'kwargs']
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == ['self', 'x', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies']
@custom_model
def model_c(x, y, a=1, b=2):
return x + y + a + b
assert model_c.param_names == ('a', 'b')
assert model_c.n_inputs == 2
sig = signature(model_c.__init__)
assert list(sig.parameters.keys()) == ['self', 'a', 'b', 'kwargs']
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_c.__call__)
assert list(sig.parameters.keys()) == ['self', 'x', 'y', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies']
def test_custom_model_subclass():
"""Test that custom models can be subclassed."""
@custom_model
def model_a(x, a=1):
return x * a
class model_b(model_a):
# Override the evaluate from model_a
@classmethod
def evaluate(cls, x, a):
return -super().evaluate(x, a)
b = model_b()
assert b.param_names == ('a',)
assert b.a == 1
assert b(1) == -1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ['self', 'a', 'kwargs']
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == ['self', 'x', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies']
def test_custom_model_parametrized_decorator():
"""Tests using custom_model as a decorator with parameters."""
def cosine(x, amplitude=1):
return [amplitude * np.cos(x)]
@custom_model(fit_deriv=cosine)
def sine(x, amplitude=1):
return amplitude * np.sin(x)
assert issubclass(sine, Model)
s = sine(2)
assert_allclose(s(np.pi / 2), 2)
assert_allclose(s.fit_deriv(0, 2), 2)
def test_custom_inverse():
"""Test setting a custom inverse on a model."""
p = models.Polynomial1D(1, c0=-2, c1=3)
# A trivial inverse for a trivial polynomial
inv = models.Polynomial1D(1, c0=(2./3.), c1=(1./3.))
with pytest.raises(NotImplementedError):
p.inverse
p.inverse = inv
x = np.arange(100)
assert_allclose(x, p(p.inverse(x)))
assert_allclose(x, p.inverse(p(x)))
p.inverse = None
with pytest.raises(NotImplementedError):
p.inverse
def test_custom_inverse_reset():
"""Test resetting a custom inverse to the model's default inverse."""
class TestModel(Model):
inputs = ()
outputs = ('y',)
@property
def inverse(self):
return models.Shift()
@staticmethod
def evaluate():
return 0
# The above test model has no meaning, nor does its inverse--this just
# tests that setting an inverse and resetting to the default inverse works
m = TestModel()
assert isinstance(m.inverse, models.Shift)
m.inverse = models.Scale()
assert isinstance(m.inverse, models.Scale)
del m.inverse
assert isinstance(m.inverse, models.Shift)
def test_render_model_2d():
imshape = (71, 141)
image = np.zeros(imshape)
coords = y, x = np.indices(imshape)
model = models.Gaussian2D(x_stddev=6.1, y_stddev=3.9, theta=np.pi / 3)
# test points for edges
ye, xe = [0, 35, 70], [0, 70, 140]
# test points for floating point positions
yf, xf = [35.1, 35.5, 35.9], [70.1, 70.5, 70.9]
test_pts = [(a, b) for a in xe for b in ye]
test_pts += [(a, b) for a in xf for b in yf]
for x0, y0 in test_pts:
model.x_mean = x0
model.y_mean = y0
expected = model(x, y)
for xy in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (xy is None):
# this case is tested in Fittable2DModelTester
continue
actual = model.render(out=im, coords=xy)
if im is None:
assert_allclose(actual, model.render(coords=xy))
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, y0) == (70, 35):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
# test an error is raised when the bounding box is larger than the input array
try:
actual = model.render(out=np.zeros((1, 1)))
except ValueError:
pass
def test_render_model_1d():
npix = 101
image = np.zeros(npix)
coords = np.arange(npix)
model = models.Gaussian1D()
# test points
test_pts = [0, 49.1, 49.5, 49.9, 100]
# test widths
test_stdv = np.arange(5.5, 6.7, .2)
for x0, stdv in [(p, s) for p in test_pts for s in test_stdv]:
model.mean = x0
model.stddev = stdv
expected = model(coords)
for x in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (x is None):
# this case is tested in Fittable1DModelTester
continue
actual = model.render(out=im, coords=x)
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, stdv) == (49.5, 5.5):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
def test_render_model_3d():
imshape = (17, 21, 27)
image = np.zeros(imshape)
coords = np.indices(imshape)
def ellipsoid(x, y, z, x0=13., y0=10., z0=8., a=4., b=3., c=2., amp=1.):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(custom_model(ellipsoid)):
@property
def bounding_box(self):
return ((self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a))
model = Ellipsoid3D()
# test points for edges
ze, ye, xe = [0, 8, 16], [0, 10, 20], [0, 13, 26]
# test points for floating point positions
zf, yf, xf = [8.1, 8.5, 8.9], [10.1, 10.5, 10.9], [13.1, 13.5, 13.9]
test_pts = [(x, y, z) for x in xe for y in ye for z in ze]
test_pts += [(x, y, z) for x in xf for y in yf for z in zf]
for x0, y0, z0 in test_pts:
model.x0 = x0
model.y0 = y0
model.z0 = z0
expected = model(*coords[::-1])
for c in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (c is None):
continue
actual = model.render(out=im, coords=c)
boxed = model.render()
# assert images match
assert_allclose(expected, actual)
# assert model fully captured
if (z0, y0, x0) == (8, 10, 13):
boxed = model.render()
assert (np.sum(expected) - np.sum(boxed)) == 0
def test_custom_bounding_box_1d():
"""
Tests that the bounding_box setter works.
"""
# 1D models
g1 = models.Gaussian1D()
bb = g1.bounding_box
expected = g1.render()
# assign the same bounding_box, now through the bounding_box setter
g1.bounding_box = bb
assert_allclose(g1.render(), expected)
# 2D models
g2 = models.Gaussian2D()
bb = g2.bounding_box
expected = g2.render()
# assign the same bounding_box, now through the bounding_box setter
g2.bounding_box = bb
assert_allclose(g2.render(), expected)
def test_n_submodels_in_single_models():
assert models.Gaussian1D.n_submodels() == 1
assert models.Gaussian2D.n_submodels() == 1
def test_compound_deepcopy():
model = (models.Gaussian1D(10, 2,3) | models.Shift(2)) & models.Rotation2D(21.3)
new_model = model.deepcopy()
assert id(model) != id(new_model)
assert id(model._submodels) != id(new_model._submodels)
assert id(model._submodels[0]) != id(new_model._submodels[0])
assert id(model._submodels[1]) != id(new_model._submodels[1])
assert id(model._submodels[2]) != id(new_model._submodels[2])
|
b784d83813471446c044a5292ef49afce573dfe89c0e67d27832abbe8d33c6e2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test separability of models.
"""
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.modeling import models
from astropy.modeling.models import Mapping
from astropy.modeling.separable import (_coord_matrix, is_separable, _cdot,
_cstack, _arith_oper, separability_matrix)
sh1 = models.Shift(1, name='shift1')
sh2 = models.Shift(2, name='sh2')
scl1 = models.Scale(1, name='scl1')
scl2 = models.Scale(2, name='scl2')
map1 = Mapping((0, 1, 0, 1), name='map1')
map2 = Mapping((0, 0, 1), name='map2')
map3 = Mapping((0, 0), name='map3')
rot = models.Rotation2D(2, name='rotation')
p2 = models.Polynomial2D(1, name='p2')
p22 = models.Polynomial2D(2, name='p22')
p1 = models.Polynomial1D(1, name='p1')
compound_models = {
'cm1': (map3 & sh1 | rot & sh1 | sh1 & sh2 & sh1,
(np.array([False, False, True]),
np.array([[True, False], [True, False], [False, True]]))
),
'cm2': (sh1 & sh2 | rot | map1 | p2 & p22,
(np.array([False, False]),
np.array([[True, True], [True, True]]))
),
'cm3': (map2 | rot & scl1,
(np.array([False, False, True]),
np.array([[True, False], [True, False], [False, True]]))
),
'cm4': (sh1 & sh2 | map2 | rot & scl1,
(np.array([False, False, True]),
np.array([[True, False], [True, False], [False, True]]))
),
'cm5': (map3 | sh1 & sh2 | scl1 & scl2,
(np.array([False, False]),
np.array([[True], [True]]))
),
'cm7': (map2 | p2 & sh1,
(np.array([False, True]),
np.array([[True, False], [False, True]]))
)
}
def test_coord_matrix():
c = _coord_matrix(p2, 'left', 2)
assert_allclose(np.array([[1, 1], [0, 0]]), c)
c = _coord_matrix(p2, 'right', 2)
assert_allclose(np.array([[0, 0], [1, 1]]), c)
c = _coord_matrix(p1, 'left', 2)
assert_allclose(np.array([[1], [0]]), c)
c = _coord_matrix(p1, 'left', 1)
assert_allclose(np.array([[1]]), c)
c = _coord_matrix(sh1, 'left', 2)
assert_allclose(np.array([[1], [0]]), c)
c = _coord_matrix(sh1, 'right', 2)
assert_allclose(np.array([[0], [1]]), c)
c = _coord_matrix(sh1, 'right', 3)
assert_allclose(np.array([[0], [0], [1]]), c)
c = _coord_matrix(map3, 'left', 2)
assert_allclose(np.array([[1], [1]]), c)
c = _coord_matrix(map3, 'left', 3)
assert_allclose(np.array([[1], [1], [0]]), c)
def test_cdot():
result = _cdot(sh1, scl1)
assert_allclose(result, np.array([[1]]))
result = _cdot(rot, p2)
assert_allclose(result, np.array([[2, 2]]))
result = _cdot(rot, rot)
assert_allclose(result, np.array([[2, 2], [2, 2]]))
result = _cdot(Mapping((0, 0)), rot)
assert_allclose(result, np.array([[2], [2]]))
def test_cstack():
result = _cstack(sh1, scl1)
assert_allclose(result, np.array([[1, 0], [0, 1]]))
result = _cstack(sh1, rot)
assert_allclose(result,
np.array([[1, 0, 0],
[0, 1, 1],
[0, 1, 1]])
)
result = _cstack(rot, sh1)
assert_allclose(result,
np.array([[1, 1, 0],
[1, 1, 0],
[0, 0, 1]])
)
def test_arith_oper():
result = _arith_oper(sh1, scl1)
assert_allclose(result, np.array([[1]]))
result = _arith_oper(rot, rot)
assert_allclose(result, np.array([[1, 1], [1, 1]]))
@pytest.mark.parametrize(('compound_model', 'result'), compound_models.values())
def test_separable(compound_model, result):
assert_allclose(is_separable(compound_model), result[0])
assert_allclose(separability_matrix(compound_model), result[1])
|
9fd76ca6be10b43f1ea77183035fae07f57347e60e600c4a73515f052d1d869b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import contextlib
import warnings
from astropy.tests.helper import catch_warnings
@contextlib.contextmanager
def ignore_non_integer_warning():
# We need to ignore this warning on Scipy < 0.14.
# When our minimum version of Scipy is bumped up, this can be
# removed.
with catch_warnings():
warnings.filterwarnings(
"always", "using a non-integer number instead of an integer "
"will result in an error in the future", DeprecationWarning)
yield
|
8a844f9f18d275c6c4fcd9beaafdc2c509459007cfd77bdf2dfbc5de14827ae6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests models.parameters
"""
import itertools
import pytest
import numpy as np
from numpy.testing import (assert_allclose, assert_equal, assert_array_equal,
assert_almost_equal)
from . import irafutil
from astropy.modeling import models, fitting
from astropy.modeling.core import Model, FittableModel
from astropy.modeling.parameters import Parameter, InputParameterError
from astropy.utils.data import get_pkg_data_filename
def setter1(val):
return val
def setter2(val, model):
model.do_something(val)
return val * model.p
class SetterModel(FittableModel):
inputs = ('x', 'y')
outputs = ('z',)
xc = Parameter(default=1, setter=setter1)
yc = Parameter(default=1, setter=setter2)
def __init__(self, xc, yc, p):
self.p = p # p is a value intended to be used by the setter
super().__init__()
self.xc = xc
self.yc = yc
def evaluate(self, x, y, xc, yc):
return ((x - xc)**2 + (y - yc)**2)
def do_something(self, v):
pass
class TParModel(Model):
"""
A toy model to test parameters machinery
"""
coeff = Parameter()
e = Parameter()
def __init__(self, coeff, e, **kwargs):
super().__init__(coeff=coeff, e=e, **kwargs)
@staticmethod
def evaluate(coeff, e):
pass
class MockModel(FittableModel):
alpha = Parameter(name='alpha', default=42)
@staticmethod
def evaluate(*args):
pass
def test_parameter_properties():
"""Test if getting / setting of Parameter properties works."""
m = MockModel()
p = m.alpha
assert p.name == 'alpha'
# Parameter names are immutable
with pytest.raises(AttributeError):
p.name = 'beta'
assert p.fixed is False
p.fixed = True
assert p.fixed is True
assert p.tied is False
p.tied = lambda _: 0
p.tied = False
assert p.tied is False
assert p.min is None
p.min = 42
assert p.min == 42
p.min = None
assert p.min is None
assert p.max is None
# TODO: shouldn't setting a max < min give an error?
p.max = 41
assert p.max == 41
def test_parameter_operators():
"""Test if the parameter arithmetic operators work."""
m = MockModel()
par = m.alpha
num = 42.
val = 3
assert par - val == num - val
assert val - par == val - num
assert par / val == num / val
assert val / par == val / num
assert par ** val == num ** val
assert val ** par == val ** num
assert par < 45
assert par > 41
assert par <= par
assert par >= par
assert par == par
assert -par == -num
assert abs(par) == abs(num)
class TestParameters:
def setup_class(self):
"""
Unit tests for parameters
Read an iraf database file created by onedspec.identify. Use the
information to create a 1D Chebyshev model and perform the same fit.
Create also a gausian model.
"""
test_file = get_pkg_data_filename('data/idcompspec.fits')
f = open(test_file)
lines = f.read()
reclist = lines.split("begin")
f.close()
record = irafutil.IdentifyRecord(reclist[1])
self.icoeff = record.coeff
order = int(record.fields['order'])
self.model = models.Chebyshev1D(order - 1)
self.gmodel = models.Gaussian1D(2, mean=3, stddev=4)
self.linear_fitter = fitting.LinearLSQFitter()
self.x = record.x
self.y = record.z
self.yy = np.array([record.z, record.z])
def test_set_slice(self):
"""
Tests updating the parameters attribute with a slice.
This is what fitters internally do.
"""
self.model.parameters[:] = np.array([3, 4, 5, 6, 7])
assert (self.model.parameters == [3., 4., 5., 6., 7.]).all()
def test_set_parameters_as_list(self):
"""Tests updating parameters using a list."""
self.model.parameters = [30, 40, 50, 60, 70]
assert (self.model.parameters == [30., 40., 50., 60, 70]).all()
def test_set_parameters_as_array(self):
"""Tests updating parameters using an array."""
self.model.parameters = np.array([3, 4, 5, 6, 7])
assert (self.model.parameters == [3., 4., 5., 6., 7.]).all()
def test_set_as_tuple(self):
"""Tests updating parameters using a tuple."""
self.model.parameters = (1, 2, 3, 4, 5)
assert (self.model.parameters == [1, 2, 3, 4, 5]).all()
def test_set_model_attr_seq(self):
"""
Tests updating the parameters attribute when a model's
parameter (in this case coeff) is updated.
"""
self.model.parameters = [0, 0., 0., 0, 0]
self.model.c0 = 7
assert (self.model.parameters == [7, 0., 0., 0, 0]).all()
def test_set_model_attr_num(self):
"""Update the parameter list when a model's parameter is updated."""
self.gmodel.amplitude = 7
assert (self.gmodel.parameters == [7, 3, 4]).all()
def test_set_item(self):
"""Update the parameters using indexing."""
self.model.parameters = [1, 2, 3, 4, 5]
self.model.parameters[0] = 10.
assert (self.model.parameters == [10, 2, 3, 4, 5]).all()
assert self.model.c0 == 10
def test_wrong_size1(self):
"""
Tests raising an error when attempting to reset the parameters
using a list of a different size.
"""
with pytest.raises(InputParameterError):
self.model.parameters = [1, 2, 3]
def test_wrong_size2(self):
"""
Tests raising an exception when attempting to update a model's
parameter (in this case coeff) with a sequence of the wrong size.
"""
with pytest.raises(InputParameterError):
self.model.c0 = [1, 2, 3]
def test_wrong_shape(self):
"""
Tests raising an exception when attempting to update a model's
parameter and the new value has the wrong shape.
"""
with pytest.raises(InputParameterError):
self.gmodel.amplitude = [1, 2]
def test_par_against_iraf(self):
"""
Test the fitter modifies model.parameters.
Uses an iraf example.
"""
new_model = self.linear_fitter(self.model, self.x, self.y)
print(self.y, self.x)
assert_allclose(new_model.parameters,
np.array(
[4826.1066602783685, 952.8943813407858,
12.641236013982386,
-1.7910672553339604,
0.90252884366711317]),
rtol=10 ** (-2))
def testPolynomial1D(self):
d = {'c0': 11, 'c1': 12, 'c2': 13, 'c3': 14}
p1 = models.Polynomial1D(3, **d)
assert_equal(p1.parameters, [11, 12, 13, 14])
def test_poly1d_multiple_sets(self):
p1 = models.Polynomial1D(3, n_models=3)
assert_equal(p1.parameters, [0.0, 0.0, 0.0, 0, 0, 0,
0, 0, 0, 0, 0, 0])
assert_array_equal(p1.c0, [0, 0, 0])
p1.c0 = [10, 10, 10]
assert_equal(p1.parameters, [10.0, 10.0, 10.0, 0, 0,
0, 0, 0, 0, 0, 0, 0])
def test_par_slicing(self):
"""
Test assigning to a parameter slice
"""
p1 = models.Polynomial1D(3, n_models=3)
p1.c0[:2] = [10, 10]
assert_equal(p1.parameters, [10.0, 10.0, 0.0, 0, 0,
0, 0, 0, 0, 0, 0, 0])
def test_poly2d(self):
p2 = models.Polynomial2D(degree=3)
p2.c0_0 = 5
assert_equal(p2.parameters, [5, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def test_poly2d_multiple_sets(self):
kw = {'c0_0': [2, 3], 'c1_0': [1, 2], 'c2_0': [4, 5],
'c0_1': [1, 1], 'c0_2': [2, 2], 'c1_1': [5, 5]}
p2 = models.Polynomial2D(2, **kw)
assert_equal(p2.parameters, [2, 3, 1, 2, 4, 5,
1, 1, 2, 2, 5, 5])
def test_shift_model_parameters1d(self):
sh1 = models.Shift(2)
sh1.offset = 3
assert sh1.offset == 3
assert sh1.offset.value == 3
def test_scale_model_parametersnd(self):
sc1 = models.Scale([2, 2])
sc1.factor = [3, 3]
assert np.all(sc1.factor == [3, 3])
assert_array_equal(sc1.factor.value, [3, 3])
def test_parameters_wrong_shape(self):
sh1 = models.Shift(2)
with pytest.raises(InputParameterError):
sh1.offset = [3, 3]
class TestMultipleParameterSets:
def setup_class(self):
self.x1 = np.arange(1, 10, .1)
self.y, self.x = np.mgrid[:10, :7]
self.x11 = np.array([self.x1, self.x1]).T
self.gmodel = models.Gaussian1D([12, 10], [3.5, 5.2], stddev=[.4, .7],
n_models=2)
def test_change_par(self):
"""
Test that a change to one parameter as a set propagates to param_sets.
"""
self.gmodel.amplitude = [1, 10]
assert_almost_equal(
self.gmodel.param_sets,
np.array([[1.,
10],
[3.5,
5.2],
[0.4,
0.7]]))
np.all(self.gmodel.parameters == [1.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_par2(self):
"""
Test that a change to one single parameter in a set propagates to
param_sets.
"""
self.gmodel.amplitude[0] = 11
assert_almost_equal(
self.gmodel.param_sets,
np.array([[11.,
10],
[3.5,
5.2],
[0.4,
0.7]]))
np.all(self.gmodel.parameters == [11.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_parameters(self):
self.gmodel.parameters = [13, 10, 9, 5.2, 0.4, 0.7]
assert_almost_equal(self.gmodel.amplitude.value, [13., 10.])
assert_almost_equal(self.gmodel.mean.value, [9., 5.2])
class TestParameterInitialization:
"""
This suite of tests checks most if not all cases if instantiating a model
with parameters of different shapes/sizes and with different numbers of
parameter sets.
"""
def test_single_model_scalar_parameters(self):
t = TParModel(10, 1)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[10], [1]])
assert np.all(t.parameters == [10, 1])
assert t.coeff.shape == ()
assert t.e.shape == ()
def test_single_model_scalar_and_array_parameters(self):
t = TParModel(10, [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert len(t.param_sets) == 2
assert np.all(t.param_sets[0] == [10])
assert np.all(t.param_sets[1] == [[1, 2]])
assert np.all(t.parameters == [10, 1, 2])
assert t.coeff.shape == ()
assert t.e.shape == (2,)
def test_single_model_1d_array_parameters(self):
t = TParModel([10, 20], [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[10, 20]], [[1, 2]]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
def test_single_model_1d_array_different_length_parameters(self):
with pytest.raises(InputParameterError):
# Not broadcastable
t = TParModel([1, 2], [3, 4, 5])
def test_single_model_2d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[[10, 20], [30, 40]]],
[[[1, 2], [3, 4]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
def test_single_model_2d_non_square_parameters(self):
coeff = np.array([[10, 20], [30, 40], [50, 60]])
e = np.array([[1, 2], [3, 4], [5, 6]])
t = TParModel(coeff, e)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[[10, 20], [30, 40], [50, 60]]],
[[[1, 2], [3, 4], [5, 6]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60,
1, 2, 3, 4, 5, 6])
assert t.coeff.shape == (3, 2)
assert t.e.shape == (3, 2)
t2 = TParModel(coeff.T, e.T)
assert len(t2) == 1
assert t2.model_set_axis is False
assert np.all(t2.param_sets == [[[[10, 30, 50], [20, 40, 60]]],
[[[1, 3, 5], [2, 4, 6]]]])
assert np.all(t2.parameters == [10, 30, 50, 20, 40, 60,
1, 3, 5, 2, 4, 6])
assert t2.coeff.shape == (2, 3)
assert t2.e.shape == (2, 3)
# Not broadcastable
with pytest.raises(InputParameterError):
TParModel(coeff, e.T)
with pytest.raises(InputParameterError):
TParModel(coeff.T, e)
def test_single_model_2d_broadcastable_parameters(self):
t = TParModel([[10, 20, 30], [40, 50, 60]], [1, 2, 3])
assert len(t) == 1
assert t.model_set_axis is False
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 20, 30], [40, 50, 60]]])
assert np.all(t.param_sets[1] == [[1, 2, 3]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3])
@pytest.mark.parametrize(('p1', 'p2'), [
(1, 2), (1, [2, 3]), ([1, 2], 3), ([1, 2, 3], [4, 5]),
([1, 2], [3, 4, 5])])
def test_two_model_incorrect_scalar_parameters(self, p1, p2):
with pytest.raises(InputParameterError):
TParModel(p1, p2, n_models=2)
@pytest.mark.parametrize('kwargs', [
{'n_models': 2}, {'model_set_axis': 0},
{'n_models': 2, 'model_set_axis': 0}])
def test_two_model_scalar_parameters(self, kwargs):
t = TParModel([10, 20], [1, 2], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[10, 20], [1, 2]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == ()
assert t.e.shape == ()
@pytest.mark.parametrize('kwargs', [
{'n_models': 2}, {'model_set_axis': 0},
{'n_models': 2, 'model_set_axis': 0}])
def test_two_model_scalar_and_array_parameters(self, kwargs):
t = TParModel([10, 20], [[1, 2], [3, 4]], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[10], [20]])
assert np.all(t.param_sets[1] == [[1, 2], [3, 4]])
assert np.all(t.parameters == [10, 20, 1, 2, 3, 4])
assert t.coeff.shape == ()
assert t.e.shape == (2,)
def test_two_model_1d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[[10, 20], [30, 40]],
[[1, 2], [3, 4]]])
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
t2 = TParModel([[10, 20, 30], [40, 50, 60]],
[[1, 2, 3], [4, 5, 6]], n_models=2)
assert len(t2) == 2
assert t2.model_set_axis == 0
assert np.all(t2.param_sets == [[[10, 20, 30], [40, 50, 60]],
[[1, 2, 3], [4, 5, 6]]])
assert np.all(t2.parameters == [10, 20, 30, 40, 50, 60,
1, 2, 3, 4, 5, 6])
assert t2.coeff.shape == (3,)
assert t2.e.shape == (3,)
def test_two_model_mixed_dimension_array_parameters(self):
with pytest.raises(InputParameterError):
# Can't broadcast different array shapes
TParModel([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[9, 10, 11], [12, 13, 14]], n_models=2)
t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[1, 2], [3, 4]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 20], [30, 40]],
[[50, 60], [70, 80]]])
assert np.all(t.param_sets[1] == [[[1, 2]], [[3, 4]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80,
1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2,)
def test_two_model_2d_array_parameters(self):
t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[[[10, 20], [30, 40]],
[[50, 60], [70, 80]]],
[[[1, 2], [3, 4]],
[[5, 6], [7, 8]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80,
1, 2, 3, 4, 5, 6, 7, 8])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
def test_two_model_nonzero_model_set_axis(self):
# An example where the model set axis is the *last* axis of the
# parameter arrays
coeff = np.array([[[10, 20, 30], [30, 40, 50]], [[50, 60, 70], [70, 80, 90]]])
coeff = np.rollaxis(coeff, 0, 3)
e = np.array([[1, 2, 3], [3, 4, 5]])
e = np.rollaxis(e, 0, 2)
t = TParModel(coeff, e, n_models=2, model_set_axis=-1)
assert len(t) == 2
assert t.model_set_axis == -1
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 50], [20, 60], [30, 70]],
[[30, 70], [40, 80], [50, 90]]])
assert np.all(t.param_sets[1] == [[[1, 3], [2, 4], [3, 5]]])
assert np.all(t.parameters == [10, 50, 20, 60, 30, 70, 30, 70, 40, 80,
50, 90, 1, 3, 2, 4, 3, 5])
assert t.coeff.shape == (2, 3)
assert t.e.shape == (3,)
def test_wrong_number_of_params(self):
with pytest.raises(InputParameterError):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), n_models=2)
with pytest.raises(InputParameterError):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), model_set_axis=0)
def test_wrong_number_of_params2(self):
with pytest.raises(InputParameterError):
m = TParModel(coeff=[[1, 2], [3, 4]], e=4, n_models=2)
with pytest.raises(InputParameterError):
m = TParModel(coeff=[[1, 2], [3, 4]], e=4, model_set_axis=0)
def test_array_parameter1(self):
with pytest.raises(InputParameterError):
t = TParModel(np.array([[1, 2], [3, 4]]), 1, model_set_axis=0)
def test_array_parameter2(self):
with pytest.raises(InputParameterError):
m = TParModel(np.array([[1, 2], [3, 4]]), (1, 1, 11),
model_set_axis=0)
def test_array_parameter4(self):
"""
Test multiple parameter model with array-valued parameters of the same
size as the number of parameter sets.
"""
t4 = TParModel([[1, 2], [3, 4]], [5, 6], model_set_axis=False)
assert len(t4) == 1
assert t4.coeff.shape == (2, 2)
assert t4.e.shape == (2,)
assert np.issubdtype(t4.param_sets.dtype, np.object_)
assert np.all(t4.param_sets[0] == [[1, 2], [3, 4]])
assert np.all(t4.param_sets[1] == [5, 6])
def test_non_broadcasting_parameters():
"""
Tests that in a model with 3 parameters that do not all mutually broadcast,
this is determined correctly regardless of what order the parameters are
in.
"""
a = 3
b = np.array([[1, 2, 3], [4, 5, 6]])
c = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
class TestModel(Model):
p1 = Parameter()
p2 = Parameter()
p3 = Parameter()
def evaluate(self, *args):
return
# a broadcasts with both b and c, but b does not broadcast with c
for args in itertools.permutations((a, b, c)):
with pytest.raises(InputParameterError):
TestModel(*args)
def test_setter():
pars = np.random.rand(20).reshape((10, 2))
model = SetterModel(-1, 3, np.pi)
for x, y in pars:
model.x = x
model.y = y
assert_almost_equal(model(x, y), (x + 1)**2 + (y - np.pi * 3)**2)
|
038f4c343deb1f6a9f73363151c98fa6d6cf69284f3ae35c7cd6decbf76a0f40 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from math import cos, sin
import pytest
import numpy as np
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.modeling import models
from astropy.wcs import wcs
@pytest.mark.parametrize(('inp'), [(0, 0), (4000, -20.56), (-2001.5, 45.9),
(0, 90), (0, -90), (np.mgrid[:4, :6])])
def test_against_wcslib(inp):
w = wcs.WCS()
crval = [202.4823228, 47.17511893]
w.wcs.crval = crval
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
lonpole = 180
tan = models.Pix2Sky_TAN()
n2c = models.RotateNative2Celestial(crval[0], crval[1], lonpole)
c2n = models.RotateCelestial2Native(crval[0], crval[1], lonpole)
m = tan | n2c
minv = c2n | tan.inverse
radec = w.wcs_pix2world(inp[0], inp[1], 1)
xy = w.wcs_world2pix(radec[0], radec[1], 1)
assert_allclose(m(*inp), radec, atol=1e-12)
assert_allclose(minv(*radec), xy, atol=1e-12)
@pytest.mark.parametrize(('inp'), [(0, 0), (40, -20.56), (21.5, 45.9)])
def test_roundtrip_sky_rotaion(inp):
lon, lat, lon_pole = 42, 43, 44
n2c = models.RotateNative2Celestial(lon, lat, lon_pole)
c2n = models.RotateCelestial2Native(lon, lat, lon_pole)
assert_allclose(n2c.inverse(*n2c(*inp)), inp, atol=1e-13)
assert_allclose(c2n.inverse(*c2n(*inp)), inp, atol=1e-13)
def test_native_celestial_lat90():
n2c = models.RotateNative2Celestial(1, 90, 0)
alpha, delta = n2c(1, 1)
assert_allclose(delta, 1)
assert_allclose(alpha, 182)
def test_Rotation2D():
model = models.Rotation2D(angle=90)
x, y = model(1, 0)
assert_allclose([x, y], [0, 1], atol=1e-10)
def test_Rotation2D_quantity():
model = models.Rotation2D(angle=90*u.deg)
x, y = model(1*u.deg, 0*u.arcsec)
assert_quantity_allclose([x, y], [0, 1]*u.deg, atol=1e-10*u.deg)
def test_Rotation2D_inverse():
model = models.Rotation2D(angle=234.23494)
x, y = model.inverse(*model(1, 0))
assert_allclose([x, y], [1, 0], atol=1e-10)
def test_euler_angle_rotations():
x = (0, 0)
y = (90, 0)
z = (0, 90)
negx = (180, 0)
negy = (-90, 0)
# rotate y into minus z
model = models.EulerAngleRotation(0, 90, 0, 'zxz')
assert_allclose(model(*z), y, atol=10**-12)
# rotate z into minus x
model = models.EulerAngleRotation(0, 90, 0, 'zyz')
assert_allclose(model(*z), negx, atol=10**-12)
# rotate x into minus y
model = models.EulerAngleRotation(0, 90, 0, 'yzy')
assert_allclose(model(*x), negy, atol=10**-12)
euler_axes_order = ['zxz', 'zyz', 'yzy', 'yxy', 'xyx', 'xzx']
@pytest.mark.parametrize(('axes_order'), euler_axes_order)
def test_euler_angles(axes_order):
"""
Tests against all Euler sequences.
The rotation matrices definitions come from Wikipedia.
"""
phi = np.deg2rad(23.4)
theta = np.deg2rad(12.2)
psi = np.deg2rad(34)
c1 = cos(phi)
c2 = cos(theta)
c3 = cos(psi)
s1 = sin(phi)
s2 = sin(theta)
s3 = sin(psi)
matrices = {'zxz': np.array([[(c1*c3 - c2*s1*s3), (-c1*s3 - c2*c3*s1), (s1*s2)],
[(c3*s1 + c1*c2*s3), (c1*c2*c3 - s1*s3), (-c1*s2)],
[(s2*s3), (c3*s2), (c2)]]),
'zyz': np.array([[(c1*c2*c3 - s1*s3), (-c3*s1 - c1*c2*s3), (c1*s2)],
[(c1*s3 + c2*c3*s1), (c1*c3 - c2*s1*s3), (s1*s2)],
[(-c3*s2), (s2*s3), (c2)]]),
'yzy': np.array([[(c1*c2*c3 - s1*s3), (-c1*s2), (c3*s1+c1*c2*s3)],
[(c3*s2), (c2), (s2*s3)],
[(-c1*s3 - c2*c3*s1), (s1*s2), (c1*c3-c2*s1*s3)]]),
'yxy': np.array([[(c1*c3 - c2*s1*s3), (s1*s2), (c1*s3+c2*c3*s1)],
[(s2*s3), (c2), (-c3*s2)],
[(-c3*s1 - c1*c2*s3), (c1*s2), (c1*c2*c3 - s1*s3)]]),
'xyx': np.array([[(c2), (s2*s3), (c3*s2)],
[(s1*s2), (c1*c3 - c2*s1*s3), (-c1*s3 - c2*c3*s1)],
[(-c1*s2), (c3*s1 + c1*c2*s3), (c1*c2*c3 - s1*s3)]]),
'xzx': np.array([[(c2), (-c3*s2), (s2*s3)],
[(c1*s2), (c1*c2*c3 - s1*s3), (-c3*s1 - c1*c2*s3)],
[(s1*s2), (c1*s3 + c2*c3*s1), (c1*c3 - c2*s1*s3)]])
}
model = models.EulerAngleRotation(23.4, 12.2, 34, axes_order)
mat = model._create_matrix(phi, theta, psi, axes_order)
assert_allclose(mat.T, matrices[axes_order]) # get_rotation_matrix(axes_order))
|
0c3a5a58f750400bbe4e2cd40fa40367b0537ba2135760dbe91f5a99c55f5c03 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import types
import pytest
import numpy as np
from numpy.testing import assert_allclose
from numpy.random import RandomState
from astropy.modeling.core import Fittable1DModel
from astropy.modeling.parameters import Parameter
from astropy.modeling import models
from astropy.modeling import fitting
from .utils import ignore_non_integer_warning
try:
from scipy import optimize
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
class TestNonLinearConstraints:
def setup_class(self):
self.g1 = models.Gaussian1D(10, 14.9, stddev=.3)
self.g2 = models.Gaussian1D(10, 13, stddev=.4)
self.x = np.arange(10, 20, .1)
self.y1 = self.g1(self.x)
self.y2 = self.g2(self.x)
rsn = RandomState(1234567890)
self.n = rsn.randn(100)
self.ny1 = self.y1 + 2 * self.n
self.ny2 = self.y2 + 2 * self.n
@pytest.mark.skipif('not HAS_SCIPY')
def test_fixed_par(self):
g1 = models.Gaussian1D(10, mean=14.9, stddev=.3,
fixed={'amplitude': True})
fitter = fitting.LevMarLSQFitter()
model = fitter(g1, self.x, self.ny1)
assert model.amplitude.value == 10
@pytest.mark.skipif('not HAS_SCIPY')
def test_tied_par(self):
def tied(model):
mean = 50 * model.stddev
return mean
g1 = models.Gaussian1D(10, mean=14.9, stddev=.3, tied={'mean': tied})
fitter = fitting.LevMarLSQFitter()
model = fitter(g1, self.x, self.ny1)
assert_allclose(model.mean.value, 50 * model.stddev,
rtol=10 ** (-5))
@pytest.mark.skipif('not HAS_SCIPY')
def test_joint_fitter(self):
g1 = models.Gaussian1D(10, 14.9, stddev=.3)
g2 = models.Gaussian1D(10, 13, stddev=.4)
jf = fitting.JointFitter([g1, g2], {g1: ['amplitude'],
g2: ['amplitude']}, [9.8])
x = np.arange(10, 20, .1)
y1 = g1(x)
y2 = g2(x)
n = np.random.randn(100)
ny1 = y1 + 2 * n
ny2 = y2 + 2 * n
jf(x, ny1, x, ny2)
p1 = [14.9, .3]
p2 = [13, .4]
A = 9.8
p = np.r_[A, p1, p2]
def compmodel(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errf(p, x1, y1, x2, y2):
return np.ravel(
np.r_[compmodel(p[0], p[1:3], x1) - y1,
compmodel(p[0], p[3:], x2) - y2])
fitparams, _ = optimize.leastsq(errf, p, args=(x, ny1, x, ny2))
assert_allclose(jf.fitparams, fitparams, rtol=10 ** (-5))
assert_allclose(g1.amplitude.value, g2.amplitude.value)
@pytest.mark.skipif('not HAS_SCIPY')
def test_no_constraints(self):
g1 = models.Gaussian1D(9.9, 14.5, stddev=.3)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errf(p, x, y):
return func(p, x) - y
p0 = [9.9, 14.5, 0.3]
y = g1(self.x)
n = np.random.randn(100)
ny = y + n
fitpar, s = optimize.leastsq(errf, p0, args=(self.x, ny))
fitter = fitting.LevMarLSQFitter()
model = fitter(g1, self.x, ny)
assert_allclose(model.parameters, fitpar, rtol=5 * 10 ** (-3))
@pytest.mark.skipif('not HAS_SCIPY')
class TestBounds:
def setup_class(self):
A = -2.0
B = 0.5
self.x = np.linspace(-1.0, 1.0, 100)
self.y = A * self.x + B + np.random.normal(scale=0.1, size=100)
data = np.array([505.0, 556.0, 630.0, 595.0, 561.0, 553.0, 543.0, 496.0, 460.0, 469.0,
426.0, 518.0, 684.0, 798.0, 830.0, 794.0, 649.0, 706.0, 671.0, 545.0,
479.0, 454.0, 505.0, 700.0, 1058.0, 1231.0, 1325.0, 997.0, 1036.0, 884.0,
610.0, 487.0, 453.0, 527.0, 780.0, 1094.0, 1983.0, 1993.0, 1809.0, 1525.0,
1056.0, 895.0, 604.0, 466.0, 510.0, 678.0, 1130.0, 1986.0, 2670.0, 2535.0,
1878.0, 1450.0, 1200.0, 663.0, 511.0, 474.0, 569.0, 848.0, 1670.0, 2611.0,
3129.0, 2507.0, 1782.0, 1211.0, 723.0, 541.0, 511.0, 518.0, 597.0, 1137.0,
1993.0, 2925.0, 2438.0, 1910.0, 1230.0, 738.0, 506.0, 461.0, 486.0, 597.0,
733.0, 1262.0, 1896.0, 2342.0, 1792.0, 1180.0, 667.0, 482.0, 454.0, 482.0,
504.0, 566.0, 789.0, 1194.0, 1545.0, 1361.0, 933.0, 562.0, 418.0, 463.0,
435.0, 466.0, 528.0, 487.0, 664.0, 799.0, 746.0, 550.0, 478.0, 535.0, 443.0,
416.0, 439.0, 472.0, 472.0, 492.0, 523.0, 569.0, 487.0, 441.0, 428.0])
self.data = data.reshape(11, 11)
def test_bounds_lsq(self):
guess_slope = 1.1
guess_intercept = 0.0
bounds = {'slope': (-1.5, 5.0), 'intercept': (-1.0, 1.0)}
line_model = models.Linear1D(guess_slope, guess_intercept,
bounds=bounds)
fitter = fitting.LevMarLSQFitter()
model = fitter(line_model, self.x, self.y)
slope = model.slope.value
intercept = model.intercept.value
assert slope + 10 ** -5 >= bounds['slope'][0]
assert slope - 10 ** -5 <= bounds['slope'][1]
assert intercept + 10 ** -5 >= bounds['intercept'][0]
assert intercept - 10 ** -5 <= bounds['intercept'][1]
def test_bounds_slsqp(self):
guess_slope = 1.1
guess_intercept = 0.0
bounds = {'slope': (-1.5, 5.0), 'intercept': (-1.0, 1.0)}
line_model = models.Linear1D(guess_slope, guess_intercept,
bounds=bounds)
fitter = fitting.SLSQPLSQFitter()
with ignore_non_integer_warning():
model = fitter(line_model, self.x, self.y)
slope = model.slope.value
intercept = model.intercept.value
assert slope + 10 ** -5 >= bounds['slope'][0]
assert slope - 10 ** -5 <= bounds['slope'][1]
assert intercept + 10 ** -5 >= bounds['intercept'][0]
assert intercept - 10 ** -5 <= bounds['intercept'][1]
def test_bounds_gauss2d_lsq(self):
X, Y = np.meshgrid(np.arange(11), np.arange(11))
bounds = {"x_mean": [0., 11.],
"y_mean": [0., 11.],
"x_stddev": [1., 4],
"y_stddev": [1., 4]}
gauss = models.Gaussian2D(amplitude=10., x_mean=5., y_mean=5.,
x_stddev=4., y_stddev=4., theta=0.5,
bounds=bounds)
gauss_fit = fitting.LevMarLSQFitter()
model = gauss_fit(gauss, X, Y, self.data)
x_mean = model.x_mean.value
y_mean = model.y_mean.value
x_stddev = model.x_stddev.value
y_stddev = model.y_stddev.value
assert x_mean + 10 ** -5 >= bounds['x_mean'][0]
assert x_mean - 10 ** -5 <= bounds['x_mean'][1]
assert y_mean + 10 ** -5 >= bounds['y_mean'][0]
assert y_mean - 10 ** -5 <= bounds['y_mean'][1]
assert x_stddev + 10 ** -5 >= bounds['x_stddev'][0]
assert x_stddev - 10 ** -5 <= bounds['x_stddev'][1]
assert y_stddev + 10 ** -5 >= bounds['y_stddev'][0]
assert y_stddev - 10 ** -5 <= bounds['y_stddev'][1]
def test_bounds_gauss2d_slsqp(self):
X, Y = np.meshgrid(np.arange(11), np.arange(11))
bounds = {"x_mean": [0., 11.],
"y_mean": [0., 11.],
"x_stddev": [1., 4],
"y_stddev": [1., 4]}
gauss = models.Gaussian2D(amplitude=10., x_mean=5., y_mean=5.,
x_stddev=4., y_stddev=4., theta=0.5,
bounds=bounds)
gauss_fit = fitting.SLSQPLSQFitter()
with ignore_non_integer_warning():
model = gauss_fit(gauss, X, Y, self.data)
x_mean = model.x_mean.value
y_mean = model.y_mean.value
x_stddev = model.x_stddev.value
y_stddev = model.y_stddev.value
assert x_mean + 10 ** -5 >= bounds['x_mean'][0]
assert x_mean - 10 ** -5 <= bounds['x_mean'][1]
assert y_mean + 10 ** -5 >= bounds['y_mean'][0]
assert y_mean - 10 ** -5 <= bounds['y_mean'][1]
assert x_stddev + 10 ** -5 >= bounds['x_stddev'][0]
assert x_stddev - 10 ** -5 <= bounds['x_stddev'][1]
assert y_stddev + 10 ** -5 >= bounds['y_stddev'][0]
assert y_stddev - 10 ** -5 <= bounds['y_stddev'][1]
class TestLinearConstraints:
def setup_class(self):
self.p1 = models.Polynomial1D(4)
self.p1.c0 = 0
self.p1.c1 = 0
self.p1.window = [0., 9.]
self.x = np.arange(10)
self.y = self.p1(self.x)
rsn = RandomState(1234567890)
self.n = rsn.randn(10)
self.ny = self.y + self.n
def test(self):
self.p1.c0.fixed = True
self.p1.c1.fixed = True
pfit = fitting.LinearLSQFitter()
model = pfit(self.p1, self.x, self.y)
assert_allclose(self.y, model(self.x))
# Test constraints as parameter properties
def test_set_fixed_1():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.mean.fixed = True
assert gauss.fixed == {'amplitude': False, 'mean': True, 'stddev': False}
def test_set_fixed_2():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
fixed={'mean': True})
assert gauss.mean.fixed is True
def test_set_tied_1():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.amplitude.tied = tie_amplitude
assert gauss.amplitude.tied is not False
assert isinstance(gauss.tied['amplitude'], types.FunctionType)
def test_set_tied_2():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
tied={'amplitude': tie_amplitude})
assert gauss.amplitude.tied
def test_unset_fixed():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
fixed={'mean': True})
gauss.mean.fixed = False
assert gauss.fixed == {'amplitude': False, 'mean': False, 'stddev': False}
def test_unset_tied():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
tied={'amplitude': tie_amplitude})
gauss.amplitude.tied = False
assert gauss.tied == {'amplitude': False, 'mean': False, 'stddev': False}
def test_set_bounds_1():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
bounds={'stddev': (0, None)})
assert gauss.bounds == {'amplitude': (None, None),
'mean': (None, None),
'stddev': (0.0, None)}
def test_set_bounds_2():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.stddev.min = 0.
assert gauss.bounds == {'amplitude': (None, None),
'mean': (None, None),
'stddev': (0.0, None)}
def test_unset_bounds():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
bounds={'stddev': (0, 2)})
gauss.stddev.min = None
gauss.stddev.max = None
assert gauss.bounds == {'amplitude': (None, None),
'mean': (None, None),
'stddev': (None, None)}
def test_default_constraints():
"""Regression test for https://github.com/astropy/astropy/issues/2396
Ensure that default constraints defined on parameters are carried through
to instances of the models those parameters are defined for.
"""
class MyModel(Fittable1DModel):
a = Parameter(default=1)
b = Parameter(default=0, min=0, fixed=True)
@staticmethod
def evaluate(x, a, b):
return x * a + b
assert MyModel.a.default == 1
assert MyModel.b.default == 0
assert MyModel.b.min == 0
assert MyModel.b.bounds == (0, None)
assert MyModel.b.fixed is True
m = MyModel()
assert m.a.value == 1
assert m.b.value == 0
assert m.b.min == 0
assert m.b.bounds == (0, None)
assert m.b.fixed is True
assert m.bounds == {'a': (None, None), 'b': (0, None)}
assert m.fixed == {'a': False, 'b': True}
# Make a model instance that overrides the default constraints and values
m = MyModel(3, 4, bounds={'a': (1, None), 'b': (2, None)},
fixed={'a': True, 'b': False})
assert m.a.value == 3
assert m.b.value == 4
assert m.a.min == 1
assert m.b.min == 2
assert m.a.bounds == (1, None)
assert m.b.bounds == (2, None)
assert m.a.fixed is True
assert m.b.fixed is False
assert m.bounds == {'a': (1, None), 'b': (2, None)}
assert m.fixed == {'a': True, 'b': False}
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_with_fixed_and_bound_constraints():
"""
Regression test for https://github.com/astropy/astropy/issues/2235
Currently doesn't test that the fit is any *good*--just that parameters
stay within their given constraints.
"""
m = models.Gaussian1D(amplitude=3, mean=4, stddev=1,
bounds={'mean': (4, 5)},
fixed={'amplitude': True})
x = np.linspace(0, 10, 10)
y = np.exp(-x ** 2 / 2)
f = fitting.LevMarLSQFitter()
fitted_1 = f(m, x, y)
assert fitted_1.mean >= 4
assert fitted_1.mean <= 5
assert fitted_1.amplitude == 3.0
m.amplitude.fixed = False
fitted_2 = f(m, x, y)
# It doesn't matter anymore what the amplitude ends up as so long as the
# bounds constraint was still obeyed
assert fitted_1.mean >= 4
assert fitted_1.mean <= 5
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_with_bound_constraints_estimate_jacobian():
"""
Regression test for https://github.com/astropy/astropy/issues/2400
Checks that bounds constraints are obeyed on a custom model that does not
define fit_deriv (and thus its Jacobian must be estimated for non-linear
fitting).
"""
class MyModel(Fittable1DModel):
a = Parameter(default=1)
b = Parameter(default=2)
@staticmethod
def evaluate(x, a, b):
return a * x + b
m_real = MyModel(a=1.5, b=-3)
x = np.arange(100)
y = m_real(x)
m = MyModel()
f = fitting.LevMarLSQFitter()
fitted_1 = f(m, x, y)
# This fit should be trivial so even without constraints on the bounds it
# should be right
assert np.allclose(fitted_1.a, 1.5)
assert np.allclose(fitted_1.b, -3)
m2 = MyModel()
m2.a.bounds = (-2, 2)
f2 = fitting.LevMarLSQFitter()
fitted_2 = f2(m2, x, y)
assert np.allclose(fitted_1.a, 1.5)
assert np.allclose(fitted_1.b, -3)
# Check that the estimated Jacobian was computed (it doesn't matter what
# the values are so long as they're not all zero.
assert np.any(f2.fit_info['fjac'] != 0)
# https://github.com/astropy/astropy/issues/6014
@pytest.mark.skipif('not HAS_SCIPY')
def test_gaussian2d_positive_stddev():
# This is 2D Gaussian with noise to be fitted, as provided by @ysBach
test = [
[-54.33, 13.81, -34.55, 8.95, -143.71, -0.81, 59.25, -14.78, -204.9,
-30.87, -124.39, 123.53, 70.81, -109.48, -106.77, 35.64, 18.29],
[-126.19, -89.13, 63.13, 50.74, 61.83, 19.06, 65.7, 77.94, 117.14,
139.37, 52.57, 236.04, 100.56, 242.28, -180.62, 154.02, -8.03],
[91.43, 96.45, -118.59, -174.58, -116.49, 80.11, -86.81, 14.62, 79.26,
7.56, 54.99, 260.13, -136.42, -20.77, -77.55, 174.52, 134.41],
[33.88, 7.63, 43.54, 70.99, 69.87, 33.97, 273.75, 176.66, 201.94,
336.34, 340.54, 163.77, -156.22, 21.49, -148.41, 94.88, 42.55],
[82.28, 177.67, 26.81, 17.66, 47.81, -31.18, 353.23, 589.11, 553.27,
242.35, 444.12, 186.02, 140.73, 75.2, -87.98, -18.23, 166.74],
[113.09, -37.01, 134.23, 71.89, 107.88, 198.69, 273.88, 626.63, 551.8,
547.61, 580.35, 337.8, 139.8, 157.64, -1.67, -26.99, 37.35],
[106.47, 31.97, 84.99, -125.79, 195.0, 493.65, 861.89, 908.31, 803.9,
781.01, 532.59, 404.67, 115.18, 111.11, 28.08, 122.05, -58.36],
[183.62, 45.22, 40.89, 111.58, 425.81, 321.53, 545.09, 866.02, 784.78,
731.35, 609.01, 405.41, -19.65, 71.2, -140.5, 144.07, 25.24],
[137.13, -86.95, 15.39, 180.14, 353.23, 699.01, 1033.8, 1014.49,
814.11, 647.68, 461.03, 249.76, 94.8, 41.17, -1.16, 183.76, 188.19],
[35.39, 26.92, 198.53, -37.78, 638.93, 624.41, 816.04, 867.28, 697.0,
491.56, 378.21, -18.46, -65.76, 98.1, 12.41, -102.18, 119.05],
[190.73, 125.82, 311.45, 369.34, 554.39, 454.37, 755.7, 736.61, 542.43,
188.24, 214.86, 217.91, 7.91, 27.46, -172.14, -82.36, -80.31],
[-55.39, 80.18, 267.19, 274.2, 169.53, 327.04, 488.15, 437.53, 225.38,
220.94, 4.01, -92.07, 39.68, 57.22, 144.66, 100.06, 34.96],
[130.47, -4.23, 46.3, 101.49, 115.01, 217.38, 249.83, 115.9, 87.36,
105.81, -47.86, -9.94, -82.28, 144.45, 83.44, 23.49, 183.9],
[-110.38, -115.98, 245.46, 103.51, 255.43, 163.47, 56.52, 33.82,
-33.26, -111.29, 88.08, 193.2, -100.68, 15.44, 86.32, -26.44, -194.1],
[109.36, 96.01, -124.89, -16.4, 84.37, 114.87, -65.65, -58.52, -23.22,
42.61, 144.91, -209.84, 110.29, 66.37, -117.85, -147.73, -122.51],
[10.94, 45.98, 118.12, -46.53, -72.14, -74.22, 21.22, 0.39, 86.03,
23.97, -45.42, 12.05, -168.61, 27.79, 61.81, 84.07, 28.79],
[46.61, -104.11, 56.71, -90.85, -16.51, -66.45, -141.34, 0.96, 58.08,
285.29, -61.41, -9.01, -323.38, 58.35, 80.14, -101.22, 145.65]]
g_init = models.Gaussian2D(x_mean=8, y_mean=8)
fitter = fitting.LevMarLSQFitter()
y, x = np.mgrid[:17, :17]
g_fit = fitter(g_init, x, y, test)
# Compare with @ysBach original result:
# - x_stddev was negative, so its abs value is used for comparison here.
# - theta is beyond (-90, 90) deg, which doesn't make sense, so ignored.
assert_allclose([g_fit.amplitude.value, g_fit.y_stddev.value],
[984.7694929790363, 3.1840618351417307], rtol=1.5e-6)
assert_allclose(g_fit.x_mean.value, 7.198391516587464)
assert_allclose(g_fit.y_mean.value, 7.49720660088511, rtol=5e-7)
assert_allclose(g_fit.x_stddev.value, 1.9840185107597297, rtol=2e-6)
# Issue #6403
@pytest.mark.skipif('not HAS_SCIPY')
def test_2d_model():
# 2D model with LevMarLSQFitter
gauss2d = models.Gaussian2D(10.2, 4.3, 5, 2, 1.2, 1.4)
fitter = fitting.LevMarLSQFitter()
X = np.linspace(-1, 7, 200)
Y = np.linspace(-1, 7, 200)
x, y = np.meshgrid(X, Y)
z = gauss2d(x, y)
w = np.ones(x.size)
w.shape = x.shape
from astropy.utils import NumpyRNGContext
with NumpyRNGContext(1234567890):
n = np.random.randn(x.size)
n.shape = x.shape
m = fitter(gauss2d, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
m = fitter(gauss2d, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
# 2D model with LevMarLSQFitter, fixed constraint
gauss2d.x_stddev.fixed = True
m = fitter(gauss2d, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
m = fitter(gauss2d, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
# Polynomial2D, col_fit_deriv=False
p2 = models.Polynomial2D(1, c0_0=1, c1_0=1.2, c0_1=3.2)
z = p2(x, y)
m = fitter(p2, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
m = fitter(p2, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
# Polynomial2D, col_fit_deriv=False, fixed constraint
p2.c1_0.fixed = True
m = fitter(p2, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
m = fitter(p2, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
def test_prior_posterior():
model = models.Gaussian1D()
model.amplitude.prior = models.Polynomial1D(1, c0=1, c1=2)
assert isinstance(model.amplitude.prior, models.Polynomial1D)
assert model.amplitude.prior.c0 == 1
assert model.amplitude.prior.c1 == 2
assert isinstance(model._constraints['prior']['amplitude'], models.Polynomial1D)
model.amplitude.prior = None
assert model.amplitude.prior is None
assert model._constraints['prior']['amplitude'] is None
|
0e7e2c92d06cdb2b82f15feabed31dc024c19a95258cab17744a8adf50454f65 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.modeling.models import Shift, Rotation2D, Gaussian1D, Identity, Mapping
from astropy.utils import NumpyRNGContext
try:
from scipy import optimize # pylint: disable=W0611
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
def test_swap_axes():
x = np.zeros((2, 3))
y = np.ones((2, 3))
mapping = Mapping((1, 0))
assert(mapping(1, 2) == (2.0, 1.0))
assert(mapping.inverse(2, 1) == (1, 2))
assert_array_equal(mapping(x, y), (y, x))
assert_array_equal(mapping.inverse(y, x), (x, y))
def test_duplicate_axes():
mapping = Mapping((0, 1, 0, 1))
assert(mapping(1, 2) == (1.0, 2., 1., 2))
assert(mapping.inverse(1, 2, 1, 2) == (1, 2))
assert(mapping.inverse.n_inputs == 4)
assert(mapping.inverse.n_outputs == 2)
def test_drop_axes_1():
mapping = Mapping((0,), n_inputs=2)
assert(mapping(1, 2) == (1.))
def test_drop_axes_2():
mapping = Mapping((1, ))
assert(mapping(1, 2) == (2.))
with pytest.raises(NotImplementedError):
mapping.inverse
def test_drop_axes_3():
mapping = Mapping((1,), n_inputs=2)
assert(mapping.n_inputs == 2)
rotation = Rotation2D(60)
model = rotation | mapping
assert_allclose(model(1, 2), 1.86602540378)
def test_identity():
x = np.zeros((2, 3))
y = np.ones((2, 3))
ident1 = Identity(1)
shift = Shift(1)
rotation = Rotation2D(angle=60)
model = ident1 & shift | rotation
assert_allclose(model(1, 2), (-2.098076211353316, 2.3660254037844393))
res_x, res_y = model(x, y)
assert_allclose((res_x, res_y),
(np.array([[-1.73205081, -1.73205081, -1.73205081],
[-1.73205081, -1.73205081, -1.73205081]]),
np.array([[1., 1., 1.],
[1., 1., 1.]])))
assert_allclose(model.inverse(res_x, res_y), (x, y), atol=1.e-10)
# https://github.com/astropy/astropy/pull/6018
@pytest.mark.skipif('not HAS_SCIPY')
def test_fittable_compound():
m = Identity(1) | Mapping((0, )) | Gaussian1D(1, 5, 4)
x = np.arange(10)
y_real = m(x)
dy = 0.005
with NumpyRNGContext(1234567):
n = np.random.normal(0., dy, x.shape)
y_noisy = y_real + n
pfit = LevMarLSQFitter()
new_model = pfit(m, x, y_noisy)
y_fit = new_model(x)
assert_allclose(y_fit, y_real, atol=dy)
|
b78a6cb95b15b24a5e2a80abeb55904be34ede4248279071de2d0e52707f82bb | # Various tests of models not related to evaluation, fitting, or parameters
import pytest
from astropy.tests.helper import assert_quantity_allclose
from astropy import units as u
from astropy.modeling.models import Mapping, Pix2Sky_TAN, Gaussian1D
from astropy.modeling import models
from astropy.modeling.core import _ModelMeta
def test_gaussian1d_bounding_box():
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
bbox = g.bounding_box
assert_quantity_allclose(bbox[0], 2.835 * u.m)
assert_quantity_allclose(bbox[1], 3.165 * u.m)
def test_gaussian1d_n_models():
g = Gaussian1D(
amplitude=[1 * u.J, 2. * u.J],
mean=[1 * u.m, 5000 * u.AA],
stddev=[0.1 * u.m, 100 * u.AA],
n_models=2)
assert_quantity_allclose(g(1.01 * u.m), [0.99501248, 0.] * u.J)
assert_quantity_allclose(
g(u.Quantity([1.01 * u.m, 5010 * u.AA])), [0.99501248, 1.990025] * u.J)
# FIXME: The following doesn't work as np.asanyarray doesn't work with a
# list of quantity objects.
# assert_quantity_allclose(g([1.01 * u.m, 5010 * u.AA]),
# [ 0.99501248, 1.990025] * u.J)
"""
Test the "rules" of model units.
"""
def test_quantity_call():
"""
Test that if constructed with Quanties models must be called with quantities.
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
g(10 * u.m)
with pytest.raises(u.UnitsError):
g(10)
def test_no_quantity_call():
"""
Test that if not constructed with Quantites they can be called without quantities.
"""
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
assert isinstance(g, Gaussian1D)
g(10)
def test_default_parameters():
# Test that calling with a quantity works when one of the parameters
# defaults to dimensionless
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm)
assert isinstance(g, Gaussian1D)
g(10*u.m)
def test_uses_quantity():
"""
Test Quantity
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
assert g.uses_quantity
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
assert not g.uses_quantity
g.mean = 3 * u.m
assert g.uses_quantity
def test_uses_quantity_compound():
"""
Test Quantity
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
g2 = Gaussian1D(mean=5 * u.m, stddev=5 * u.cm, amplitude=5 * u.Jy)
assert (g | g2).uses_quantity
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
g2 = Gaussian1D(mean=5, stddev=5, amplitude=5)
comp = g | g2
assert not (comp).uses_quantity
def test_uses_quantity_no_param():
comp = Mapping((0, 1)) | Pix2Sky_TAN()
assert comp.uses_quantity
def _allmodels():
allmodels = []
for name in dir(models):
model = getattr(models, name)
if type(model) is _ModelMeta:
try:
m = model()
except Exception:
pass
allmodels.append(m)
return allmodels
@pytest.mark.parametrize("m", _allmodels())
def test_read_only(m):
"""
input_units
return_units
input_units_allow_dimensionless
input_units_strict
"""
with pytest.raises(AttributeError):
m.input_units = {}
with pytest.raises(AttributeError):
m.return_units = {}
with pytest.raises(AttributeError):
m.input_units_allow_dimensionless = {}
with pytest.raises(AttributeError):
m.input_units_strict = {}
|
3cb3ebba204ec06a2c387b2264fde63617fe3e6700d1eafefb5217e2a147ab5c | from collections import OrderedDict
import pytest
import numpy as np
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.modeling.functional_models import (Gaussian1D,
Sersic1D, Sine1D, Linear1D,
Lorentz1D, Voigt1D, Const1D,
Box1D, Trapezoid1D, MexicanHat1D,
Moffat1D, Gaussian2D, Const2D, Ellipse2D,
Disk2D, Ring2D, Box2D, TrapezoidDisk2D,
MexicanHat2D, AiryDisk2D, Moffat2D, Sersic2D)
from astropy.modeling.powerlaws import (PowerLaw1D, BrokenPowerLaw1D, SmoothlyBrokenPowerLaw1D,
ExponentialCutoffPowerLaw1D, LogParabola1D)
from astropy.modeling.polynomial import Polynomial1D, Polynomial2D
from astropy.modeling.fitting import LevMarLSQFitter
try:
from scipy import optimize
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
FUNC_MODELS_1D = [
{'class': Gaussian1D,
'parameters': {'amplitude': 3 * u.Jy, 'mean': 2 * u.m, 'stddev': 30 * u.cm},
'evaluation': [(2600 * u.mm, 3 * u.Jy * np.exp(-2))],
'bounding_box': [0.35, 3.65] * u.m},
{'class': Sersic1D,
'parameters': {'amplitude': 3 * u.MJy / u.sr, 'r_eff': 2 * u.arcsec, 'n': 4},
'evaluation': [(3 * u.arcsec, 1.3237148119468918 * u.MJy/u.sr)],
'bounding_box': False},
{'class': Sine1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.5},
'evaluation': [(1 * u.s, -3 * u.km / u.s)],
'bounding_box': False},
{'class': Linear1D,
'parameters': {'slope': 3 * u.km / u.s, 'intercept': 5000 * u.m},
'evaluation': [(6000 * u.ms, 23 * u.km)],
'bounding_box': False},
{'class': Lorentz1D,
'parameters': {'amplitude': 2 * u.Jy, 'x_0': 505 * u.nm, 'fwhm': 100 * u.AA},
'evaluation': [(0.51 * u.micron, 1 * u.Jy)],
'bounding_box': [255, 755] * u.nm},
{'class': Voigt1D,
'parameters': {'amplitude_L': 2 * u.Jy, 'x_0': 505 * u.nm,
'fwhm_L': 100 * u.AA, 'fwhm_G': 50 * u.AA},
'evaluation': [(0.51 * u.micron, 1.06264568 * u.Jy)],
'bounding_box': False},
{'class': Const1D,
'parameters': {'amplitude': 3 * u.Jy},
'evaluation': [(0.6 * u.micron, 3 * u.Jy)],
'bounding_box': False},
{'class': Box1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'width': 1 * u.um},
'evaluation': [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],
'bounding_box': [3.9, 4.9] * u.um},
{'class': Trapezoid1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'width': 1 * u.um, 'slope': 5 * u.Jy / u.um},
'evaluation': [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],
'bounding_box': [3.3, 5.5] * u.um},
{'class': MexicanHat1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'sigma': 1e-3 * u.mm},
'evaluation': [(1000 * u.nm, -0.09785050 * u.Jy)],
'bounding_box': [-5.6, 14.4] * u.um},
{'class': Moffat1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'gamma': 1e-3 * u.mm, 'alpha': 1},
'evaluation': [(1000 * u.nm, 0.238853503 * u.Jy)],
'bounding_box': False},
]
FUNC_MODELS_2D = [
{'class': Gaussian2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_mean': 2 * u.m, 'y_mean': 1 * u.m,
'x_stddev': 3 * u.m, 'y_stddev': 2 * u.m, 'theta': 45 * u.deg},
'evaluation': [(412.1320343 * u.cm, 3.121320343 * u.m, 3 * u.Jy * np.exp(-0.5))],
'bounding_box': [[-14.18257445, 16.18257445], [-10.75693665, 14.75693665]] * u.m},
{'class': Const2D,
'parameters': {'amplitude': 3 * u.Jy},
'evaluation': [(0.6 * u.micron, 0.2 * u.m, 3 * u.Jy)],
'bounding_box': False},
{'class': Disk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'R_0': 300 * u.cm},
'evaluation': [(5.8 * u.m, 201 * u.cm, 3 * u.Jy)],
'bounding_box': [[-1, 5], [0, 6]] * u.m},
{'class': TrapezoidDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 1 * u.m, 'y_0': 2 * u.m,
'R_0': 100 * u.cm, 'slope': 1 * u.Jy / u.m},
'evaluation': [(3.5 * u.m, 2 * u.m, 1.5 * u.Jy)],
'bounding_box': [[-2, 6], [-3, 5]] * u.m},
{'class': Ellipse2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'a': 300 * u.cm, 'b': 200 * u.cm, 'theta': 45 * u.deg},
'evaluation': [(4 * u.m, 300 * u.cm, 3 * u.Jy)],
'bounding_box': [[-0.76046808, 4.76046808], [0.68055697, 5.31944302]] * u.m},
{'class': Ring2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'r_in': 2 * u.cm, 'r_out': 2.1 * u.cm},
'evaluation': [(302.05 * u.cm, 2 * u.m + 10 * u.um, 3 * u.Jy)],
'bounding_box': [[1.979, 2.021], [2.979, 3.021]] * u.m},
{'class': Box2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.s,
'x_width': 4 * u.cm, 'y_width': 3 * u.s},
'evaluation': [(301 * u.cm, 3 * u.s, 3 * u.Jy)],
'bounding_box': [[0.5 * u.s, 3.5 * u.s], [2.98 * u.m, 3.02 * u.m]]},
{'class': MexicanHat2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'sigma': 1 * u.m},
'evaluation': [(4 * u.m, 2.5 * u.m, 0.602169107 * u.Jy)],
'bounding_box': False},
{'class': AiryDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'radius': 1 * u.m},
'evaluation': [(4 * u.m, 2.1 * u.m, 4.76998480e-05 * u.Jy)],
'bounding_box': False},
{'class': Moffat2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'y_0': 3.5 * u.um,
'gamma': 1e-3 * u.mm, 'alpha': 1},
'evaluation': [(1000 * u.nm, 2 * u.um, 0.202565833 * u.Jy)],
'bounding_box': False},
{'class': Sersic2D,
'parameters': {'amplitude': 3 * u.MJy / u.sr, 'x_0': 1 * u.arcsec,
'y_0': 2 * u.arcsec, 'r_eff': 2 * u.arcsec, 'n': 4,
'ellip': 0, 'theta': 0},
'evaluation': [(3 * u.arcsec, 2.5 * u.arcsec, 2.829990489 * u.MJy/u.sr)],
'bounding_box': False},
]
POWERLAW_MODELS = [
{'class': PowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1},
'evaluation': [(1 * u.m, 500 * u.g)],
'bounding_box': False},
{'class': BrokenPowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_break': 10 * u.cm, 'alpha_1': 1, 'alpha_2': -1},
'evaluation': [(1 * u.m, 50 * u.kg), (1 * u.cm, 50 * u.kg)],
'bounding_box': False},
{'class': SmoothlyBrokenPowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_break': 10 * u.cm, 'alpha_1': 1, 'alpha_2': -1, 'delta': 1},
'evaluation': [(1 * u.m, 15.125 * u.kg), (1 * u.cm, 15.125 * u.kg)],
'bounding_box': False},
{'class': ExponentialCutoffPowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1, 'x_cutoff': 1 * u.m},
'evaluation': [(1 * u.um, 499999.5 * u.kg), (10 * u.m, 50 * np.exp(-10) * u.g)],
'bounding_box': False},
{'class': LogParabola1D,
'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1, 'beta': 2},
'evaluation': [(1 * u.cm, 5 * 0.1 ** (-1 - 2 * np.log(0.1)) * u.kg)],
'bounding_box': False}
]
POLY_MODELS = [
{'class': Polynomial1D,
'parameters': {'degree': 2, 'c0': 3 * u.one, 'c1': 2 / u.m, 'c2': 3 / u.m**2},
'evaluation': [(3 * u.m, 36 * u.one)],
'bounding_box': False},
{'class': Polynomial1D,
'parameters': {'degree': 2, 'c0': 3 * u.kg, 'c1': 2 * u.kg / u.m, 'c2': 3 * u.kg / u.m**2},
'evaluation': [(3 * u.m, 36 * u.kg)],
'bounding_box': False},
{'class': Polynomial1D,
'parameters': {'degree': 2, 'c0': 3 * u.kg, 'c1': 2 * u.kg, 'c2': 3 * u.kg},
'evaluation': [(3 * u.one, 36 * u.kg)],
'bounding_box': False},
{'class': Polynomial2D,
'parameters': {'degree': 2, 'c0_0': 3 * u.one, 'c1_0': 2 / u.m, 'c2_0': 3 / u.m**2,
'c0_1': 3 / u.s, 'c0_2': -2 / u.s**2, 'c1_1': 5 / u.m / u.s},
'evaluation': [(3 * u.m, 2 * u.s, 64 * u.one)],
'bounding_box': False},
{'class': Polynomial2D,
'parameters': {'degree': 2, 'c0_0': 3 * u.kg, 'c1_0': 2 * u.kg / u.m, 'c2_0': 3 * u.kg / u.m**2,
'c0_1': 3 * u.kg / u.s, 'c0_2': -2 * u.kg / u.s**2, 'c1_1': 5 * u.kg / u.m / u.s},
'evaluation': [(3 * u.m, 2 * u.s, 64 * u.kg)],
'bounding_box': False},
{'class': Polynomial2D,
'parameters': {'degree': 2, 'c0_0': 3 * u.kg, 'c1_0': 2 * u.kg, 'c2_0': 3 * u.kg,
'c0_1': 3 * u.kg, 'c0_2': -2 * u.kg, 'c1_1': 5 * u.kg},
'evaluation': [(3 * u.one, 2 * u.one, 64 * u.kg)],
'bounding_box': False},
]
MODELS = FUNC_MODELS_1D + FUNC_MODELS_2D + POWERLAW_MODELS
SCIPY_MODELS = set([Sersic1D, Sersic2D, AiryDisk2D])
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_without_units(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
for args in model['evaluation']:
if len(args) == 2:
kwargs = OrderedDict(zip(('x', 'y'), args))
else:
kwargs = OrderedDict(zip(('x', 'y', 'z'), args))
if kwargs['x'].unit.is_equivalent(kwargs['y'].unit):
kwargs['x'] = kwargs['x'].to(kwargs['y'].unit)
mnu = m.without_units_for_data(**kwargs)
args = [x.value for x in kwargs.values()]
assert_quantity_allclose(mnu(*args[:-1]), args[-1])
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_with_units(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
for args in model['evaluation']:
assert_quantity_allclose(m(*args[:-1]), args[-1])
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_with_units_x_array(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
for args in model['evaluation']:
if len(args) == 2:
x, y = args
x_arr = u.Quantity([x, x])
result = m(x_arr)
assert_quantity_allclose(result, u.Quantity([y, y]))
else:
x, y, z = args
x_arr = u.Quantity([x, x])
y_arr = u.Quantity([y, y])
result = m(x_arr, y_arr)
assert_quantity_allclose(result, u.Quantity([z, z]))
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_with_units_param_array(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
params = {}
for key, value in model['parameters'].items():
if value is None or key == 'degree':
params[key] = value
else:
params[key] = np.repeat(value, 2)
params['n_models'] = 2
m = model['class'](**params)
for args in model['evaluation']:
if len(args) == 2:
x, y = args
x_arr = u.Quantity([x, x])
result = m(x_arr)
assert_quantity_allclose(result, u.Quantity([y, y]))
else:
x, y, z = args
x_arr = u.Quantity([x, x])
y_arr = u.Quantity([y, y])
result = m(x_arr, y_arr)
assert_quantity_allclose(result, u.Quantity([z, z]))
@pytest.mark.parametrize('model', MODELS)
def test_models_bounding_box(model):
# In some cases, having units in parameters caused bounding_box to break,
# so this is to ensure that it works correctly.
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
# In the following we need to explicitly test that the value is False
# since Quantities no longer evaluate as as True
if model['bounding_box'] is False:
# Check that NotImplementedError is raised, so that if bounding_box is
# implemented we remember to set bounding_box=True in the list of models
# above
with pytest.raises(NotImplementedError):
m.bounding_box
else:
# A bounding box may have inhomogeneous units so we need to check the
# values one by one.
for i in range(len(model['bounding_box'])):
bbox = m.bounding_box
assert_quantity_allclose(bbox[i], model['bounding_box'][i])
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('model', MODELS)
def test_models_fitting(model):
m = model['class'](**model['parameters'])
if len(model['evaluation'][0]) == 2:
x = np.linspace(1, 3, 100) * model['evaluation'][0][0].unit
y = np.exp(-x.value ** 2) * model['evaluation'][0][1].unit
args = [x, y]
else:
x = np.linspace(1, 3, 100) * model['evaluation'][0][0].unit
y = np.linspace(1, 3, 100) * model['evaluation'][0][1].unit
z = np.exp(-x.value**2 - y.value**2) * model['evaluation'][0][2].unit
args = [x, y, z]
# Test that the model fits even if it has units on parameters
fitter = LevMarLSQFitter()
m_new = fitter(m, *args)
# Check that units have been put back correctly
for param_name in m.param_names:
par_bef = getattr(m, param_name)
par_aft = getattr(m_new, param_name)
if par_bef.unit is None:
# If the parameter used to not have a unit then had a radian unit
# for example, then we should allow that
assert par_aft.unit is None or par_aft.unit is u.rad
else:
assert par_aft.unit.is_equivalent(par_bef.unit)
|
7c56bb503f27d15487c751545e127f6a63648801159f3488c55a0403f39b5faf | """
Test the web profile using Python classes that have been adapted to act like a
web client. We can only put a single test here because only one hub can run
with the web profile active, and the user might want to run the tests in
parallel.
"""
import os
import threading
import tempfile
from urllib.request import Request, urlopen
from astropy.utils.data import get_readable_fileobj
from astropy.samp import SAMPIntegratedClient, SAMPHubServer
from .web_profile_test_helpers import (AlwaysApproveWebProfileDialog,
SAMPIntegratedWebClient)
from astropy.samp.web_profile import CROSS_DOMAIN, CLIENT_ACCESS_POLICY
from astropy.samp import conf
from .test_standard_profile import TestStandardProfile as BaseTestStandardProfile
def setup_module(module):
conf.use_internet = False
class TestWebProfile(BaseTestStandardProfile):
def setup_method(self, method):
self.dialog = AlwaysApproveWebProfileDialog()
t = threading.Thread(target=self.dialog.poll)
t.start()
self.tmpdir = tempfile.mkdtemp()
lockfile = os.path.join(self.tmpdir, '.samp')
self.hub = SAMPHubServer(web_profile_dialog=self.dialog,
lockfile=lockfile,
web_port=0, pool_size=1)
self.hub.start()
self.client1 = SAMPIntegratedClient()
self.client1.connect(hub=self.hub, pool_size=1)
self.client1_id = self.client1.get_public_id()
self.client1_key = self.client1.get_private_key()
self.client2 = SAMPIntegratedWebClient()
self.client2.connect(web_port=self.hub._web_port, pool_size=2)
self.client2_id = self.client2.get_public_id()
self.client2_key = self.client2.get_private_key()
def teardown_method(self, method):
if self.client1.is_connected:
self.client1.disconnect()
if self.client2.is_connected:
self.client2.disconnect()
self.hub.stop()
self.dialog.stop()
# The full communication tests are run since TestWebProfile inherits
# test_main from TestStandardProfile
def test_web_profile(self):
# Check some additional queries to the server
with get_readable_fileobj('http://localhost:{0}/crossdomain.xml'.format(self.hub._web_port)) as f:
assert f.read() == CROSS_DOMAIN
with get_readable_fileobj('http://localhost:{0}/clientaccesspolicy.xml'.format(self.hub._web_port)) as f:
assert f.read() == CLIENT_ACCESS_POLICY
# Check headers
req = Request('http://localhost:{0}/crossdomain.xml'.format(self.hub._web_port))
req.add_header('Origin', 'test_web_profile')
resp = urlopen(req)
assert resp.getheader('Access-Control-Allow-Origin') == 'test_web_profile'
assert resp.getheader('Access-Control-Allow-Headers') == 'Content-Type'
assert resp.getheader('Access-Control-Allow-Credentials') == 'true'
|
0fea967d94aeba28dc27f2e73b82d258d7c5236e4108dc7d3523fc0f25ad2ae5 | import os
import time
import pickle
import random
import string
from astropy.samp import SAMP_STATUS_OK
TEST_REPLY = {"samp.status": SAMP_STATUS_OK,
"samp.result": {"txt": "test"}}
def write_output(mtype, private_key, sender_id, params):
filename = params['verification_file']
f = open(filename, 'wb')
pickle.dump(mtype, f)
pickle.dump(private_key, f)
pickle.dump(sender_id, f)
pickle.dump(params, f)
f.close()
def assert_output(mtype, private_key, sender_id, params, timeout=None):
filename = params['verification_file']
start = time.time()
while True:
try:
with open(filename, 'rb') as f:
rec_mtype = pickle.load(f)
rec_private_key = pickle.load(f)
rec_sender_id = pickle.load(f)
rec_params = pickle.load(f)
break
except (OSError, EOFError):
if timeout is not None and time.time() - start > timeout:
raise Exception("Timeout while waiting for file: {0}".format(filename))
assert rec_mtype == mtype
assert rec_private_key == private_key
assert rec_sender_id == sender_id
assert rec_params == params
class Receiver:
def __init__(self, client):
self.client = client
def receive_notification(self, private_key, sender_id, mtype, params, extra):
write_output(mtype, private_key, sender_id, params)
def receive_call(self, private_key, sender_id, msg_id, mtype, params, extra):
# Here we need to make sure that we first reply, *then* write out the
# file, otherwise the tests see the file and move to the next call
# before waiting for the reply to be received.
self.client.reply(msg_id, TEST_REPLY)
self.receive_notification(private_key, sender_id, mtype, params, extra)
def receive_response(self, private_key, sender_id, msg_id, response):
pass
def random_id(length=16):
return ''.join(random.sample(string.ascii_letters + string.digits, length))
def random_params(directory):
return {'verification_file': os.path.join(directory, random_id()),
'parameter1': 'abcde',
'parameter2': 1331}
|
03b052f4c0f682df1472d603b65c9ecc0d410f82f3bcb7e0399a0139234dc045 | import time
import threading
import xmlrpc.client as xmlrpc
from astropy.samp.hub import WebProfileDialog
from astropy.samp.hub_proxy import SAMPHubProxy
from astropy.samp.client import SAMPClient
from astropy.samp.integrated_client import SAMPIntegratedClient
from astropy.samp.utils import ServerProxyPool
from astropy.samp.errors import SAMPClientError, SAMPHubError
class AlwaysApproveWebProfileDialog(WebProfileDialog):
def __init__(self):
self.polling = True
WebProfileDialog.__init__(self)
def show_dialog(self, *args):
self.consent()
def poll(self):
while self.polling:
self.handle_queue()
time.sleep(0.1)
def stop(self):
self.polling = False
class SAMPWebHubProxy(SAMPHubProxy):
"""
Proxy class to simplify the client interaction with a SAMP hub (via the web
profile).
In practice web clients should run from the browser, so this is provided as
a means of testing a hub's support for the web profile from Python.
"""
def connect(self, pool_size=20, web_port=21012):
"""
Connect to the current SAMP Hub on localhost:web_port
Parameters
----------
pool_size : int, optional
The number of socket connections opened to communicate with the
Hub.
"""
self._connected = False
try:
self.proxy = ServerProxyPool(pool_size, xmlrpc.ServerProxy,
'http://127.0.0.1:{0}'.format(web_port),
allow_none=1)
self.ping()
self._connected = True
except xmlrpc.ProtocolError as p:
raise SAMPHubError("Protocol Error {}: {}".format(p.errcode, p.errmsg))
@property
def _samp_hub(self):
"""
Property to abstract away the path to the hub, which allows this class
to be used for both the standard and the web profile.
"""
return self.proxy.samp.webhub
def set_xmlrpc_callback(self, private_key, xmlrpc_addr):
raise NotImplementedError("set_xmlrpc_callback is not defined for the "
"web profile")
def register(self, identity_info):
"""
Proxy to ``register`` SAMP Hub method.
"""
return self._samp_hub.register(identity_info)
def allow_reverse_callbacks(self, private_key, allow):
"""
Proxy to ``allowReverseCallbacks`` SAMP Hub method.
"""
return self._samp_hub.allowReverseCallbacks(private_key, allow)
def pull_callbacks(self, private_key, timeout):
"""
Proxy to ``pullCallbacks`` SAMP Hub method.
"""
return self._samp_hub.pullCallbacks(private_key, timeout)
class SAMPWebClient(SAMPClient):
"""
Utility class which provides facilities to create and manage a SAMP
compliant XML-RPC server that acts as SAMP callable web client application.
In practice web clients should run from the browser, so this is provided as
a means of testing a hub's support for the web profile from Python.
Parameters
----------
hub : :class:`~astropy.samp.hub_proxy.SAMPWebHubProxy`
An instance of :class:`~astropy.samp.hub_proxy.SAMPWebHubProxy` to
be used for messaging with the SAMP Hub.
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
def __init__(self, hub, name=None, description=None, metadata=None,
callable=True):
# GENERAL
self._is_running = False
self._is_registered = False
if metadata is None:
metadata = {}
if name is not None:
metadata["samp.name"] = name
if description is not None:
metadata["samp.description.text"] = description
self._metadata = metadata
self._callable = callable
# HUB INTERACTION
self.client = None
self._public_id = None
self._private_key = None
self._hub_id = None
self._notification_bindings = {}
self._call_bindings = {"samp.app.ping": [self._ping, {}],
"client.env.get": [self._client_env_get, {}]}
self._response_bindings = {}
self.hub = hub
if self._callable:
self._thread = threading.Thread(target=self._serve_forever)
self._thread.daemon = True
def _serve_forever(self):
while self.is_running:
# Watch for callbacks here
if self._is_registered:
results = self.hub.pull_callbacks(self.get_private_key(), 0)
for result in results:
if result['samp.methodName'] == 'receiveNotification':
self.receive_notification(self._private_key,
*result['samp.params'])
elif result['samp.methodName'] == 'receiveCall':
self.receive_call(self._private_key,
*result['samp.params'])
elif result['samp.methodName'] == 'receiveResponse':
self.receive_response(self._private_key,
*result['samp.params'])
self.hub.server_close()
def register(self):
"""
Register the client to the SAMP Hub.
"""
if self.hub.is_connected:
if self._private_key is not None:
raise SAMPClientError("Client already registered")
result = self.hub.register("Astropy SAMP Web Client")
if result["samp.self-id"] == "":
raise SAMPClientError("Registation failed - samp.self-id "
"was not set by the hub.")
if result["samp.private-key"] == "":
raise SAMPClientError("Registation failed - samp.private-key "
"was not set by the hub.")
self._public_id = result["samp.self-id"]
self._private_key = result["samp.private-key"]
self._hub_id = result["samp.hub-id"]
if self._callable:
self._declare_subscriptions()
self.hub.allow_reverse_callbacks(self._private_key, True)
if self._metadata != {}:
self.declare_metadata()
self._is_registered = True
else:
raise SAMPClientError("Unable to register to the SAMP Hub. Hub "
"proxy not connected.")
class SAMPIntegratedWebClient(SAMPIntegratedClient):
"""
A Simple SAMP web client.
In practice web clients should run from the browser, so this is provided as
a means of testing a hub's support for the web profile from Python.
This class is meant to simplify the client usage providing a proxy class
that merges the :class:`~astropy.samp.client.SAMPWebClient` and
:class:`~astropy.samp.hub_proxy.SAMPWebHubProxy` functionalities in a
simplified API.
Parameters
----------
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
def __init__(self, name=None, description=None, metadata=None,
callable=True):
self.hub = SAMPWebHubProxy()
self.client = SAMPWebClient(self.hub, name, description, metadata,
callable)
def connect(self, pool_size=20, web_port=21012):
"""
Connect with the current or specified SAMP Hub, start and register the
client.
Parameters
----------
pool_size : int, optional
The number of socket connections opened to communicate with the
Hub.
"""
self.hub.connect(pool_size, web_port=web_port)
self.client.start()
self.client.register()
|
3d3f8fd67043ee833b644105942533d7133bcba8dfedb545c14e88ec7f8deaf1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.utils.iers package provides access to the tables provided by
the International Earth Rotation and Reference Systems Service, in
particular allowing interpolation of published UT1-UTC values for given
times. These are used in `astropy.time` to provide UT1 values. The polar
motions are also used for determining earth orientation for
celestial-to-terrestrial coordinate transformations
(in `astropy.coordinates`).
"""
from warnings import warn
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import numpy as np
from astropy import config as _config
from astropy import units as u
from astropy.table import Table, QTable
from astropy.utils.data import get_pkg_data_filename, clear_download_cache
from astropy import utils
from astropy.utils.exceptions import AstropyWarning
__all__ = ['Conf', 'conf',
'IERS', 'IERS_B', 'IERS_A', 'IERS_Auto',
'FROM_IERS_B', 'FROM_IERS_A', 'FROM_IERS_A_PREDICTION',
'TIME_BEFORE_IERS_RANGE', 'TIME_BEYOND_IERS_RANGE',
'IERS_A_FILE', 'IERS_A_URL', 'IERS_A_URL_MIRROR', 'IERS_A_README',
'IERS_B_FILE', 'IERS_B_URL', 'IERS_B_README',
'IERSRangeError', 'IERSStaleWarning']
# IERS-A default file name, URL, and ReadMe with content description
IERS_A_FILE = 'finals2000A.all'
IERS_A_URL = 'http://maia.usno.navy.mil/ser7/finals2000A.all'
IERS_A_URL_MIRROR = 'http://toshi.nofs.navy.mil/ser7/finals2000A.all'
IERS_A_README = get_pkg_data_filename('data/ReadMe.finals2000A')
# IERS-B default file name, URL, and ReadMe with content description
IERS_B_FILE = get_pkg_data_filename('data/eopc04_IAU2000.62-now')
IERS_B_URL = 'http://hpiers.obspm.fr/iers/eop/eopc04/eopc04_IAU2000.62-now'
IERS_B_README = get_pkg_data_filename('data/ReadMe.eopc04_IAU2000')
# Status/source values returned by IERS.ut1_utc
FROM_IERS_B = 0
FROM_IERS_A = 1
FROM_IERS_A_PREDICTION = 2
TIME_BEFORE_IERS_RANGE = -1
TIME_BEYOND_IERS_RANGE = -2
MJD_ZERO = 2400000.5
INTERPOLATE_ERROR = """\
interpolating from IERS_Auto using predictive values that are more
than {0} days old.
Normally you should not see this error because this class
automatically downloads the latest IERS-A table. Perhaps you are
offline? If you understand what you are doing then this error can be
suppressed by setting the auto_max_age configuration variable to
``None``:
from astropy.utils.iers import conf
conf.auto_max_age = None
"""
def download_file(*args, **kwargs):
"""
Overload astropy.utils.data.download_file within iers module to use a
custom (longer) wait time. This just passes through ``*args`` and
``**kwargs`` after temporarily setting the download_file remote timeout to
the local ``iers.conf.remote_timeout`` value.
"""
with utils.data.conf.set_temp('remote_timeout', conf.remote_timeout):
return utils.data.download_file(*args, **kwargs)
class IERSStaleWarning(AstropyWarning):
pass
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.utils.iers`.
"""
auto_download = _config.ConfigItem(
True,
'Enable auto-downloading of the latest IERS data. If set to False '
'then the local IERS-B file will be used by default. Default is True.')
auto_max_age = _config.ConfigItem(
30.0,
'Maximum age (days) of predictive data before auto-downloading. Default is 30.')
iers_auto_url = _config.ConfigItem(
IERS_A_URL,
'URL for auto-downloading IERS file data.')
iers_auto_url_mirror = _config.ConfigItem(
IERS_A_URL_MIRROR,
'Mirror URL for auto-downloading IERS file data.')
remote_timeout = _config.ConfigItem(
10.0,
'Remote timeout downloading IERS file data (seconds).')
conf = Conf()
class IERSRangeError(IndexError):
"""
Any error for when dates are outside of the valid range for IERS
"""
class IERS(QTable):
"""Generic IERS table class, defining interpolation functions.
Sub-classed from `astropy.table.QTable`. The table should hold columns
'MJD', 'UT1_UTC', 'dX_2000A'/'dY_2000A', and 'PM_x'/'PM_y'.
"""
iers_table = None
@classmethod
def open(cls, file=None, cache=False, **kwargs):
"""Open an IERS table, reading it from a file if not loaded before.
Parameters
----------
file : str or None
full local or network path to the ascii file holding IERS data,
for passing on to the ``read`` class methods (further optional
arguments that are available for some IERS subclasses can be added).
If None, use the default location from the ``read`` class method.
cache : bool
Whether to use cache. Defaults to False, since IERS files
are regularly updated.
Returns
-------
An IERS table class instance
Notes
-----
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table if ``file=None`` (the default).
If a table needs to be re-read from disk, pass on an explicit file
location or use the (sub-class) close method and re-open.
If the location is a network location it is first downloaded via
download_file.
For the IERS class itself, an IERS_B sub-class instance is opened.
"""
if file is not None or cls.iers_table is None:
if file is not None:
if urlparse(file).netloc:
kwargs.update(file=download_file(file, cache=cache))
else:
kwargs.update(file=file)
cls.iers_table = cls.read(**kwargs)
return cls.iers_table
@classmethod
def close(cls):
"""Remove the IERS table from the class.
This allows the table to be re-read from disk during one's session
(e.g., if one finds it is out of date and has updated the file).
"""
cls.iers_table = None
def mjd_utc(self, jd1, jd2=0.):
"""Turn a time to MJD, returning integer and fractional parts.
Parameters
----------
jd1 : float, array, or Time
first part of two-part JD, or Time object
jd2 : float or array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
Returns
-------
mjd : float or array
integer part of MJD
utc : float or array
fractional part of MJD
"""
try: # see if this is a Time object
jd1, jd2 = jd1.utc.jd1, jd1.utc.jd2
except Exception:
pass
mjd = np.floor(jd1 - MJD_ZERO + jd2)
utc = jd1 - (MJD_ZERO+mjd) + jd2
return mjd, utc
def ut1_utc(self, jd1, jd2=0., return_status=False):
"""Interpolate UT1-UTC corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, float array, or Time object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(jd1, jd2, ['UT1_UTC'],
self.ut1_utc_source if return_status else None)
def dcip_xy(self, jd1, jd2=0., return_status=False):
"""Interpolate CIP corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, float array, or Time object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD (default 0., ignored if jd1 is Time)
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
D_x : Quantity with angle units
x component of CIP correction for the requested times
D_y : Quantity with angle units
y component of CIP correction for the requested times
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(jd1, jd2, ['dX_2000A', 'dY_2000A'],
self.dcip_source if return_status else None)
def pm_xy(self, jd1, jd2=0., return_status=False):
"""Interpolate polar motions from IERS Table for given dates.
Parameters
----------
jd1 : float, float array, or Time object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
PM_x : Quantity with angle units
x component of polar motion for the requested times
PM_y : Quantity with angle units
y component of polar motion for the requested times
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(jd1, jd2, ['PM_x', 'PM_y'],
self.pm_source if return_status else None)
def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd):
"""
Check that the indices from interpolation match those after clipping
to the valid table range. This method gets overridden in the IERS_Auto
class because it has different requirements.
"""
if np.any(indices_orig != indices_clipped):
raise IERSRangeError('(some) times are outside of range covered '
'by IERS table.')
def _interpolate(self, jd1, jd2, columns, source=None):
mjd, utc = self.mjd_utc(jd1, jd2)
# enforce array
is_scalar = not hasattr(mjd, '__array__') or mjd.ndim == 0
if is_scalar:
mjd = np.array([mjd])
utc = np.array([utc])
self._refresh_table_as_needed(mjd)
# For typical format, will always find a match (since MJD are integer)
# hence, important to define which side we will be; this ensures
# self['MJD'][i-1]<=mjd<self['MJD'][i]
i = np.searchsorted(self['MJD'].value, mjd, side='right')
# Get index to MJD at or just below given mjd, clipping to ensure we
# stay in range of table (status will be set below for those outside)
i1 = np.clip(i, 1, len(self) - 1)
i0 = i1 - 1
mjd_0, mjd_1 = self['MJD'][i0].value, self['MJD'][i1].value
results = []
for column in columns:
val_0, val_1 = self[column][i0], self[column][i1]
d_val = val_1 - val_0
if column == 'UT1_UTC':
# Check & correct for possible leap second (correcting diff.,
# not 1st point, since jump can only happen right at 2nd point)
d_val -= d_val.round()
# Linearly interpolate (which is what TEMPO does for UT1-UTC, but
# may want to follow IERS gazette #13 for more precise
# interpolation and correction for tidal effects;
# http://maia.usno.navy.mil/iers-gaz13)
val = val_0 + (mjd - mjd_0 + utc) / (mjd_1 - mjd_0) * d_val
# Do not extrapolate outside range, instead just propagate last values.
val[i == 0] = self[column][0]
val[i == len(self)] = self[column][-1]
if is_scalar:
val = val[0]
results.append(val)
if source:
# Set status to source, using the routine passed in.
status = source(i1)
# Check for out of range
status[i == 0] = TIME_BEFORE_IERS_RANGE
status[i == len(self)] = TIME_BEYOND_IERS_RANGE
if is_scalar:
status = status[0]
results.append(status)
return results
else:
self._check_interpolate_indices(i1, i, np.max(mjd))
return results[0] if len(results) == 1 else results
def _refresh_table_as_needed(self, mjd):
"""
Potentially update the IERS table in place depending on the requested
time values in ``mdj`` and the time span of the table. The base behavior
is not to update the table. ``IERS_Auto`` overrides this method.
"""
pass
def ut1_utc_source(self, i):
"""Source for UT1-UTC. To be overridden by subclass."""
return np.zeros_like(i)
def dcip_source(self, i):
"""Source for CIP correction. To be overridden by subclass."""
return np.zeros_like(i)
def pm_source(self, i):
"""Source for polar motion. To be overridden by subclass."""
return np.zeros_like(i)
@property
def time_now(self):
"""
Property to provide the current time, but also allow for explicitly setting
the _time_now attribute for testing purposes.
"""
from astropy.time import Time
try:
return self._time_now
except Exception:
return Time.now()
class IERS_A(IERS):
"""IERS Table class targeted to IERS A, provided by USNO.
These include rapid turnaround and predicted times.
See http://maia.usno.navy.mil/
Notes
-----
The IERS A file is not part of astropy. It can be downloaded from
``iers.IERS_A_URL`` or ``iers.IERS_A_URL_MIRROR``. See ``iers.__doc__``
for instructions on use in ``Time``, etc.
"""
iers_table = None
@classmethod
def _combine_a_b_columns(cls, iers_a):
"""
Return a new table with appropriate combination of IERS_A and B columns.
"""
# IERS A has some rows at the end that hold nothing but dates & MJD
# presumably to be filled later. Exclude those a priori -- there
# should at least be a predicted UT1-UTC and PM!
table = iers_a[~iers_a['UT1_UTC_A'].mask &
~iers_a['PolPMFlag_A'].mask]
# This does nothing for IERS_A, but allows IERS_Auto to ensure the
# IERS B values in the table are consistent with the true ones.
table = cls._substitute_iers_b(table)
# Run np.where on the data from the table columns, since in numpy 1.9
# it otherwise returns an only partially initialized column.
table['UT1_UTC'] = np.where(table['UT1_UTC_B'].mask,
table['UT1_UTC_A'].data,
table['UT1_UTC_B'].data)
# Ensure the unit is correct, for later column conversion to Quantity.
table['UT1_UTC'].unit = table['UT1_UTC_A'].unit
table['UT1Flag'] = np.where(table['UT1_UTC_B'].mask,
table['UT1Flag_A'].data,
'B')
# Repeat for polar motions.
table['PM_x'] = np.where(table['PM_X_B'].mask,
table['PM_x_A'].data,
table['PM_X_B'].data)
table['PM_x'].unit = table['PM_x_A'].unit
table['PM_y'] = np.where(table['PM_Y_B'].mask,
table['PM_y_A'].data,
table['PM_Y_B'].data)
table['PM_y'].unit = table['PM_y_A'].unit
table['PolPMFlag'] = np.where(table['PM_X_B'].mask,
table['PolPMFlag_A'].data,
'B')
table['dX_2000A'] = np.where(table['dX_2000A_B'].mask,
table['dX_2000A_A'].data,
table['dX_2000A_B'].data)
table['dX_2000A'].unit = table['dX_2000A_A'].unit
table['dY_2000A'] = np.where(table['dY_2000A_B'].mask,
table['dY_2000A_A'].data,
table['dY_2000A_B'].data)
table['dY_2000A'].unit = table['dY_2000A_A'].unit
table['NutFlag'] = np.where(table['dX_2000A_B'].mask,
table['NutFlag_A'].data,
'B')
# Get the table index for the first row that has predictive values
# PolPMFlag_A IERS (I) or Prediction (P) flag for
# Bull. A polar motion values
# UT1Flag_A IERS (I) or Prediction (P) flag for
# Bull. A UT1-UTC values
is_predictive = (table['UT1Flag_A'] == 'P') | (table['PolPMFlag_A'] == 'P')
table.meta['predictive_index'] = np.min(np.flatnonzero(is_predictive))
table.meta['predictive_mjd'] = table['MJD'][table.meta['predictive_index']]
return table
@classmethod
def _substitute_iers_b(cls, table):
# See documentation in IERS_Auto.
return table
@classmethod
def read(cls, file=None, readme=None):
"""Read IERS-A table from a finals2000a.* file provided by USNO.
Parameters
----------
file : str
full path to ascii file holding IERS-A data.
Defaults to ``iers.IERS_A_FILE``.
readme : str
full path to ascii file holding CDS-style readme.
Defaults to package version, ``iers.IERS_A_README``.
Returns
-------
``IERS_A`` class instance
"""
if file is None:
file = IERS_A_FILE
if readme is None:
readme = IERS_A_README
# Read in as a regular Table, including possible masked columns.
# Columns will be filled and converted to Quantity in cls.__init__.
iers_a = Table.read(file, format='cds', readme=readme)
# Combine the A and B data for UT1-UTC and PM columns
table = cls._combine_a_b_columns(iers_a)
table.meta['data_path'] = file
table.meta['readme_path'] = readme
# Fill any masked values, and convert to a QTable.
return cls(table.filled())
def ut1_utc_source(self, i):
"""Set UT1-UTC source flag for entries in IERS table"""
ut1flag = self['UT1Flag'][i]
source = np.ones_like(i) * FROM_IERS_B
source[ut1flag == 'I'] = FROM_IERS_A
source[ut1flag == 'P'] = FROM_IERS_A_PREDICTION
return source
def dcip_source(self, i):
"""Set CIP correction source flag for entries in IERS table"""
nutflag = self['NutFlag'][i]
source = np.ones_like(i) * FROM_IERS_B
source[nutflag == 'I'] = FROM_IERS_A
source[nutflag == 'P'] = FROM_IERS_A_PREDICTION
return source
def pm_source(self, i):
"""Set polar motion source flag for entries in IERS table"""
pmflag = self['PolPMFlag'][i]
source = np.ones_like(i) * FROM_IERS_B
source[pmflag == 'I'] = FROM_IERS_A
source[pmflag == 'P'] = FROM_IERS_A_PREDICTION
return source
class IERS_B(IERS):
"""IERS Table class targeted to IERS B, provided by IERS itself.
These are final values; see http://www.iers.org/
Notes
-----
If the package IERS B file (```iers.IERS_B_FILE``) is out of date, a new
version can be downloaded from ``iers.IERS_B_URL``.
"""
iers_table = None
@classmethod
def read(cls, file=None, readme=None, data_start=14):
"""Read IERS-B table from a eopc04_iau2000.* file provided by IERS.
Parameters
----------
file : str
full path to ascii file holding IERS-B data.
Defaults to package version, ``iers.IERS_B_FILE``.
readme : str
full path to ascii file holding CDS-style readme.
Defaults to package version, ``iers.IERS_B_README``.
data_start : int
starting row. Default is 14, appropriate for standard IERS files.
Returns
-------
``IERS_B`` class instance
"""
if file is None:
file = IERS_B_FILE
if readme is None:
readme = IERS_B_README
# Read in as a regular Table, including possible masked columns.
# Columns will be filled and converted to Quantity in cls.__init__.
iers_b = Table.read(file, format='cds', readme=readme,
data_start=data_start)
return cls(iers_b.filled())
def ut1_utc_source(self, i):
"""Set UT1-UTC source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
def dcip_source(self, i):
"""Set CIP correction source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
def pm_source(self, i):
"""Set PM source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
class IERS_Auto(IERS_A):
"""
Provide most-recent IERS data and automatically handle downloading
of updated values as necessary.
"""
iers_table = None
@classmethod
def open(cls):
"""If the configuration setting ``astropy.utils.iers.conf.auto_download``
is set to True (default), then open a recent version of the IERS-A
table with predictions for UT1-UTC and polar motion out to
approximately one year from now. If the available version of this file
is older than ``astropy.utils.iers.conf.auto_max_age`` days old
(or non-existent) then it will be downloaded over the network and cached.
If the configuration setting ``astropy.utils.iers.conf.auto_download``
is set to False then ``astropy.utils.iers.IERS()`` is returned. This
is normally the IERS-B table that is supplied with astropy.
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table.
Returns
-------
`~astropy.table.QTable` instance with IERS (Earth rotation) data columns
"""
if not conf.auto_download:
cls.iers_table = IERS.open()
return cls.iers_table
all_urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
if cls.iers_table is not None:
# If the URL has changed, we need to redownload the file, so we
# should ignore the internally cached version.
if cls.iers_table.meta.get('data_url') in all_urls:
return cls.iers_table
dl_success = False
err_list = []
for url in all_urls:
try:
filename = download_file(url, cache=True)
except Exception as err:
err_list.append(str(err))
else:
dl_success = True
break
if not dl_success:
# Issue a warning here, perhaps user is offline. An exception
# will be raised downstream when actually trying to interpolate
# predictive values.
warn(AstropyWarning('failed to download {}, using local IERS-B: {}'
.format(' and '.join(all_urls),
';'.join(err_list)))) # noqa
cls.iers_table = IERS.open()
return cls.iers_table
cls.iers_table = cls.read(file=filename)
cls.iers_table.meta['data_url'] = str(url)
return cls.iers_table
def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd):
"""Check that the indices from interpolation match those after clipping to the
valid table range. The IERS_Auto class is exempted as long as it has
sufficiently recent available data so the clipped interpolation is
always within the confidence bounds of current Earth rotation
knowledge.
"""
predictive_mjd = self.meta['predictive_mjd']
# See explanation in _refresh_table_as_needed for these conditions
auto_max_age = (conf.auto_max_age if conf.auto_max_age is not None
else np.finfo(float).max)
if (max_input_mjd > predictive_mjd and
self.time_now.mjd - predictive_mjd > auto_max_age):
raise ValueError(INTERPOLATE_ERROR.format(auto_max_age))
def _refresh_table_as_needed(self, mjd):
"""Potentially update the IERS table in place depending on the requested
time values in ``mjd`` and the time span of the table.
For IERS_Auto the behavior is that the table is refreshed from the IERS
server if both the following apply:
- Any of the requested IERS values are predictive. The IERS-A table
contains predictive data out for a year after the available
definitive values.
- The first predictive values are at least ``conf.auto_max_age days`` old.
In other words the IERS-A table was created by IERS long enough
ago that it can be considered stale for predictions.
"""
max_input_mjd = np.max(mjd)
now_mjd = self.time_now.mjd
# IERS-A table contains predictive data out for a year after
# the available definitive values.
fpi = self.meta['predictive_index']
predictive_mjd = self.meta['predictive_mjd']
# Update table in place if necessary
auto_max_age = (conf.auto_max_age if conf.auto_max_age is not None
else np.finfo(float).max)
# If auto_max_age is smaller than IERS update time then repeated downloads may
# occur without getting updated values (giving a IERSStaleWarning).
if auto_max_age < 10:
raise ValueError('IERS auto_max_age configuration value must be larger than 10 days')
if (max_input_mjd > predictive_mjd and
now_mjd - predictive_mjd > auto_max_age):
all_urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
dl_success = False
err_list = []
# Get the latest version
for url in all_urls:
try:
clear_download_cache(url)
filename = download_file(url, cache=True)
except Exception as err:
err_list.append(str(err))
else:
dl_success = True
break
if not dl_success:
# Issue a warning here, perhaps user is offline. An exception
# will be raised downstream when actually trying to interpolate
# predictive values.
warn(AstropyWarning('failed to download {}: {}.\nA coordinate or time-related '
'calculation might be compromised or fail because the dates are '
'not covered by the available IERS file. See the '
'"IERS data access" section of the astropy documentation '
'for additional information on working offline.'
.format(' and '.join(all_urls), ';'.join(err_list))))
return
new_table = self.__class__.read(file=filename)
new_table.meta['data_url'] = str(url)
# New table has new values?
if new_table['MJD'][-1] > self['MJD'][-1]:
# Replace *replace* current values from the first predictive index through
# the end of the current table. This replacement is much faster than just
# deleting all rows and then using add_row for the whole duration.
new_fpi = np.searchsorted(new_table['MJD'].value, predictive_mjd, side='right')
n_replace = len(self) - fpi
self[fpi:] = new_table[new_fpi:new_fpi + n_replace]
# Sanity check for continuity
if new_table['MJD'][new_fpi + n_replace] - self['MJD'][-1] != 1.0 * u.d:
raise ValueError('unexpected gap in MJD when refreshing IERS table')
# Now add new rows in place
for row in new_table[new_fpi + n_replace:]:
self.add_row(row)
self.meta.update(new_table.meta)
else:
warn(IERSStaleWarning(
'IERS_Auto predictive values are older than {} days but downloading '
'the latest table did not find newer values'.format(conf.auto_max_age)))
@classmethod
def _substitute_iers_b(cls, table):
"""Substitute IERS B values with those from a real IERS B table.
IERS-A has IERS-B values included, but for reasons unknown these
do not match the latest IERS-B values (see comments in #4436).
Here, we use the bundled astropy IERS-B table to overwrite the values
in the downloaded IERS-A table.
"""
iers_b = IERS_B.open()
# Substitute IERS-B values for existing B values in IERS-A table
mjd_b = table['MJD'][~table['UT1_UTC_B'].mask]
i0 = np.searchsorted(iers_b['MJD'].value, mjd_b[0], side='left')
i1 = np.searchsorted(iers_b['MJD'].value, mjd_b[-1], side='right')
iers_b = iers_b[i0:i1]
n_iers_b = len(iers_b)
# If there is overlap then replace IERS-A values from available IERS-B
if n_iers_b > 0:
# Sanity check that we are overwriting the correct values
if not np.allclose(table['MJD'][:n_iers_b], iers_b['MJD'].value):
raise ValueError('unexpected mismatch when copying '
'IERS-B values into IERS-A table.')
# Finally do the overwrite
table['UT1_UTC_B'][:n_iers_b] = iers_b['UT1_UTC'].value
table['PM_X_B'][:n_iers_b] = iers_b['PM_x'].value
table['PM_Y_B'][:n_iers_b] = iers_b['PM_y'].value
return table
# by default for IERS class, read IERS-B table
IERS.read = IERS_B.read
|
298425e6c7ecb848548716cf97d2cebde19128f8a87f4a2ca5fbae62a2a2d049 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains a class that makes it simple to stream out well-formed and
nicely-indented XML.
"""
# STDLIB
import contextlib
import textwrap
try:
from . import _iterparser
except ImportError:
def xml_escape_cdata(s):
"""
Escapes &, < and > in an XML CDATA string.
"""
s = s.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
def xml_escape(s):
"""
Escapes &, ', ", < and > in an XML attribute value.
"""
s = s.replace("&", "&")
s = s.replace("'", "'")
s = s.replace("\"", """)
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
else:
xml_escape_cdata = _iterparser.escape_xml_cdata
xml_escape = _iterparser.escape_xml
class XMLWriter:
"""
A class to write well-formed and nicely indented XML.
Use like this::
w = XMLWriter(fh)
with w.tag('html'):
with w.tag('body'):
w.data('This is the content')
Which produces::
<html>
<body>
This is the content
</body>
</html>
"""
def __init__(self, file):
"""
Parameters
----------
file : writable file-like object.
"""
self.write = file.write
if hasattr(file, "flush"):
self.flush = file.flush
self._open = 0 # true if start tag is open
self._tags = []
self._data = []
self._indentation = " " * 64
self.xml_escape_cdata = xml_escape_cdata
self.xml_escape = xml_escape
def _flush(self, indent=True, wrap=False):
"""
Flush internal buffers.
"""
if self._open:
if indent:
self.write(">\n")
else:
self.write(">")
self._open = 0
if self._data:
data = ''.join(self._data)
if wrap:
indent = self.get_indentation_spaces(1)
data = textwrap.fill(
data,
initial_indent=indent,
subsequent_indent=indent)
self.write('\n')
self.write(self.xml_escape_cdata(data))
self.write('\n')
self.write(self.get_indentation_spaces())
else:
self.write(self.xml_escape_cdata(data))
self._data = []
def start(self, tag, attrib={}, **extra):
"""
Opens a new element. Attributes can be given as keyword
arguments, or as a string/string dictionary. The method
returns an opaque identifier that can be passed to the
:meth:`close` method, to close all open elements up to and
including this one.
Parameters
----------
tag : str
The element name
attrib : dict of str -> str
Attribute dictionary. Alternatively, attributes can
be given as keyword arguments.
Returns
-------
id : int
Returns an element identifier.
"""
self._flush()
# This is just busy work -- we know our tag names are clean
# tag = xml_escape_cdata(tag)
self._data = []
self._tags.append(tag)
self.write(self.get_indentation_spaces(-1))
self.write("<{}".format(tag))
if attrib or extra:
attrib = attrib.copy()
attrib.update(extra)
attrib = list(attrib.items())
attrib.sort()
for k, v in attrib:
if v is not None:
# This is just busy work -- we know our keys are clean
# k = xml_escape_cdata(k)
v = self.xml_escape(v)
self.write(" {}=\"{}\"".format(k, v))
self._open = 1
return len(self._tags)
@contextlib.contextmanager
def xml_cleaning_method(self, method='escape_xml', **clean_kwargs):
"""Context manager to control how XML data tags are cleaned (escaped) to
remove potentially unsafe characters or constructs.
The default (``method='escape_xml'``) applies brute-force escaping of
certain key XML characters like ``<``, ``>``, and ``&`` to ensure that
the output is not valid XML.
In order to explicitly allow certain XML tags (e.g. link reference or
emphasis tags), use ``method='bleach_clean'``. This sanitizes the data
string using the ``clean`` function of the
`http://bleach.readthedocs.io/en/latest/clean.html <bleach>`_ package.
Any additional keyword arguments will be passed directly to the
``clean`` function.
Finally, use ``method='none'`` to disable any sanitization. This should
be used sparingly.
Example::
w = writer.XMLWriter(ListWriter(lines))
with w.xml_cleaning_method('bleach_clean'):
w.start('td')
w.data('<a href="http://google.com">google.com</a>')
w.end()
Parameters
----------
method : str
Cleaning method. Allowed values are "escape_xml",
"bleach_clean", and "none".
**clean_kwargs : keyword args
Additional keyword args that are passed to the
bleach.clean() function.
"""
current_xml_escape_cdata = self.xml_escape_cdata
if method == 'bleach_clean':
# NOTE: bleach is imported locally to avoid importing it when
# it is not nocessary
try:
import bleach
except ImportError:
raise ValueError('bleach package is required when HTML escaping is disabled.\n'
'Use "pip install bleach".')
if clean_kwargs is None:
clean_kwargs = {}
self.xml_escape_cdata = lambda x: bleach.clean(x, **clean_kwargs)
elif method == "none":
self.xml_escape_cdata = lambda x: x
elif method != 'escape_xml':
raise ValueError('allowed values of method are "escape_xml", "bleach_clean", and "none"')
yield
self.xml_escape_cdata = current_xml_escape_cdata
@contextlib.contextmanager
def tag(self, tag, attrib={}, **extra):
"""
A convenience method for creating wrapper elements using the
``with`` statement.
Examples
--------
>>> with writer.tag('foo'): # doctest: +SKIP
... writer.element('bar')
... # </foo> is implicitly closed here
...
Parameters are the same as to `start`.
"""
self.start(tag, attrib, **extra)
yield
self.end(tag)
def comment(self, comment):
"""
Adds a comment to the output stream.
Parameters
----------
comment : str
Comment text, as a Unicode string.
"""
self._flush()
self.write(self.get_indentation_spaces())
self.write("<!-- {} -->\n".format(self.xml_escape_cdata(comment)))
def data(self, text):
"""
Adds character data to the output stream.
Parameters
----------
text : str
Character data, as a Unicode string.
"""
self._data.append(text)
def end(self, tag=None, indent=True, wrap=False):
"""
Closes the current element (opened by the most recent call to
`start`).
Parameters
----------
tag : str
Element name. If given, the tag must match the start tag.
If omitted, the current element is closed.
"""
if tag:
if not self._tags:
raise ValueError("unbalanced end({})".format(tag))
if tag != self._tags[-1]:
raise ValueError("expected end({}), got {}".format(
self._tags[-1], tag))
else:
if not self._tags:
raise ValueError("unbalanced end()")
tag = self._tags.pop()
if self._data:
self._flush(indent, wrap)
elif self._open:
self._open = 0
self.write("/>\n")
return
if indent:
self.write(self.get_indentation_spaces())
self.write("</{}>\n".format(tag))
def close(self, id):
"""
Closes open elements, up to (and including) the element identified
by the given identifier.
Parameters
----------
id : int
Element identifier, as returned by the `start` method.
"""
while len(self._tags) > id:
self.end()
def element(self, tag, text=None, wrap=False, attrib={}, **extra):
"""
Adds an entire element. This is the same as calling `start`,
`data`, and `end` in sequence. The ``text`` argument
can be omitted.
"""
self.start(tag, attrib, **extra)
if text:
self.data(text)
self.end(indent=False, wrap=wrap)
def flush(self):
pass # replaced by the constructor
def get_indentation(self):
"""
Returns the number of indentation levels the file is currently
in.
"""
return len(self._tags)
def get_indentation_spaces(self, offset=0):
"""
Returns a string of spaces that matches the current
indentation level.
"""
return self._indentation[:len(self._tags) + offset]
@staticmethod
def object_attrs(obj, attrs):
"""
Converts an object with a bunch of attributes on an object
into a dictionary for use by the `XMLWriter`.
Parameters
----------
obj : object
Any Python object
attrs : sequence of str
Attribute names to pull from the object
Returns
-------
attrs : dict
Maps attribute names to the values retrieved from
``obj.attr``. If any of the attributes is `None`, it will
not appear in the output dictionary.
"""
d = {}
for attr in attrs:
if getattr(obj, attr) is not None:
d[attr.replace('_', '-')] = str(getattr(obj, attr))
return d
|
aec8c8ff472704fb8e279af444b4fe8ea81a50af593c83680d441437ff55c72a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from distutils.core import Extension
from os.path import join
import sys
from astropy_helpers import setup_helpers
def get_external_libraries():
return ['expat']
def get_extensions(build_type='release'):
XML_DIR = 'astropy/utils/xml/src'
cfg = setup_helpers.DistutilsExtensionArgs({
'sources': [join(XML_DIR, "iterparse.c")]
})
if setup_helpers.use_system_library('expat'):
cfg.update(setup_helpers.pkg_config(['expat'], ['expat']))
else:
EXPAT_DIR = 'cextern/expat/lib'
cfg['sources'].extend([
join(EXPAT_DIR, fn) for fn in
["xmlparse.c", "xmlrole.c", "xmltok.c", "xmltok_impl.c",
"loadlibrary.c"]])
cfg['include_dirs'].extend([XML_DIR, EXPAT_DIR])
if sys.platform.startswith('linux'):
# This is to ensure we only export the Python entry point
# symbols and the linker won't try to use the system expat in
# place of ours.
cfg['extra_link_args'].extend([
'-Wl,--version-script={0}'.format(
join(XML_DIR, 'iterparse.map'))
])
cfg['define_macros'].append(("HAVE_EXPAT_CONFIG_H", 1))
if sys.byteorder == 'big':
cfg['define_macros'].append(('BYTEORDER', '4321'))
else:
cfg['define_macros'].append(('BYTEORDER', '1234'))
if sys.platform != 'win32':
cfg['define_macros'].append(('HAVE_UNISTD_H', None))
return [Extension("astropy.utils.xml._iterparser", **cfg)]
|
162c64d21ce98ca1607ce22f2427c240ee7d69f1c67c247ba93b89c8c0269969 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module includes a fast iterator-based XML parser.
"""
# STDLIB
import contextlib
import io
import sys
# ASTROPY
from astropy.utils import data
__all__ = ['get_xml_iterator', 'get_xml_encoding', 'xml_readlines']
@contextlib.contextmanager
def _convert_to_fd_or_read_function(fd):
"""
Returns a function suitable for streaming input, or a file object.
This function is only useful if passing off to C code where:
- If it's a real file object, we want to use it as a real
C file object to avoid the Python overhead.
- If it's not a real file object, it's much handier to just
have a Python function to call.
This is somewhat quirky behavior, of course, which is why it is
private. For a more useful version of similar behavior, see
`astropy.utils.misc.get_readable_fileobj`.
Parameters
----------
fd : object
May be:
- a file object. If the file is uncompressed, this raw
file object is returned verbatim. Otherwise, the read
method is returned.
- a function that reads from a stream, in which case it is
returned verbatim.
- a file path, in which case it is opened. Again, like a
file object, if it's uncompressed, a raw file object is
returned, otherwise its read method.
- an object with a :meth:`read` method, in which case that
method is returned.
Returns
-------
fd : context-dependent
See above.
"""
if callable(fd):
yield fd
return
with data.get_readable_fileobj(fd, encoding='binary') as new_fd:
if sys.platform.startswith('win'):
yield new_fd.read
else:
if isinstance(new_fd, io.FileIO):
yield new_fd
else:
yield new_fd.read
def _fast_iterparse(fd, buffersize=2 ** 10):
from xml.parsers import expat
if not callable(fd):
read = fd.read
else:
read = fd
queue = []
text = []
def start(name, attr):
queue.append((True, name, attr,
(parser.CurrentLineNumber, parser.CurrentColumnNumber)))
del text[:]
def end(name):
queue.append((False, name, ''.join(text).strip(),
(parser.CurrentLineNumber, parser.CurrentColumnNumber)))
parser = expat.ParserCreate()
parser.specified_attributes = True
parser.StartElementHandler = start
parser.EndElementHandler = end
parser.CharacterDataHandler = text.append
Parse = parser.Parse
data = read(buffersize)
while data:
Parse(data, False)
for elem in queue:
yield elem
del queue[:]
data = read(buffersize)
Parse('', True)
for elem in queue:
yield elem
# Try to import the C version of the iterparser, otherwise fall back
# to the Python implementation above.
_slow_iterparse = _fast_iterparse
try:
from . import _iterparser
_fast_iterparse = _iterparser.IterParser
except ImportError:
pass
@contextlib.contextmanager
def get_xml_iterator(source, _debug_python_based_parser=False):
"""
Returns an iterator over the elements of an XML file.
The iterator doesn't ever build a tree, so it is much more memory
and time efficient than the alternative in ``cElementTree``.
Parameters
----------
fd : readable file-like object or read function
Returns
-------
parts : iterator
The iterator returns 4-tuples (*start*, *tag*, *data*, *pos*):
- *start*: when `True` is a start element event, otherwise
an end element event.
- *tag*: The name of the element
- *data*: Depends on the value of *event*:
- if *start* == `True`, data is a dictionary of
attributes
- if *start* == `False`, data is a string containing
the text content of the element
- *pos*: Tuple (*line*, *col*) indicating the source of the
event.
"""
with _convert_to_fd_or_read_function(source) as fd:
if _debug_python_based_parser:
context = _slow_iterparse(fd)
else:
context = _fast_iterparse(fd)
yield iter(context)
def get_xml_encoding(source):
"""
Determine the encoding of an XML file by reading its header.
Parameters
----------
source : readable file-like object, read function or str path
Returns
-------
encoding : str
"""
with get_xml_iterator(source) as iterator:
start, tag, data, pos = next(iterator)
if not start or tag != 'xml':
raise OSError('Invalid XML file')
# The XML spec says that no encoding === utf-8
return data.get('encoding') or 'utf-8'
def xml_readlines(source):
"""
Get the lines from a given XML file. Correctly determines the
encoding and always returns unicode.
Parameters
----------
source : readable file-like object, read function or str path
Returns
-------
lines : list of unicode
"""
encoding = get_xml_encoding(source)
with data.get_readable_fileobj(source, encoding=encoding) as input:
input.seek(0)
xml_lines = input.readlines()
return xml_lines
|
05cbf68a06ac5f7c91bf1d0691aeb005ae2c7faaa944c27d42a1f6252895941e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Functions to do XML schema and DTD validation. At the moment, this
makes a subprocess call to xmllint. This could use a Python-based
library at some point in the future, if something appropriate could be
found.
"""
import os
import subprocess
def validate_schema(filename, schema_file):
"""
Validates an XML file against a schema or DTD.
Parameters
----------
filename : str
The path to the XML file to validate
schema_file : str
The path to the XML schema or DTD
Returns
-------
returncode, stdout, stderr : int, str, str
Returns the returncode from xmllint and the stdout and stderr
as strings
"""
base, ext = os.path.splitext(schema_file)
if ext == '.xsd':
schema_part = '--schema ' + schema_file
elif ext == '.dtd':
schema_part = '--dtdvalid ' + schema_file
else:
raise TypeError("schema_file must be a path to an XML Schema or DTD")
p = subprocess.Popen(
"xmllint --noout --nonet {} {}".format(schema_part, filename),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode == 127:
raise OSError(
"xmllint not found, so can not validate schema")
elif p.returncode < 0:
from astropy.utils.misc import signal_number_to_name
raise OSError(
"xmllint was terminated by signal '{0}'".format(
signal_number_to_name(-p.returncode)))
return p.returncode, stdout, stderr
|
98bedc1f2dfc90b6609723f665313136da223d39d0f5229a5f9c50f714479fed | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import json
import os
from datetime import datetime
import locale
import pytest
import numpy as np
from astropy.utils import data, misc
def test_isiterable():
assert misc.isiterable(2) is False
assert misc.isiterable([2]) is True
assert misc.isiterable([1, 2, 3]) is True
assert misc.isiterable(np.array(2)) is False
assert misc.isiterable(np.array([1, 2, 3])) is True
def test_signal_number_to_name_no_failure():
# Regression test for #5340: ensure signal_number_to_name throws no
# AttributeError (it used ".iteritems()" which was removed in Python3).
misc.signal_number_to_name(0)
@pytest.mark.remote_data
def test_api_lookup():
strurl = misc.find_api_page('astropy.utils.misc', 'dev', False, timeout=3)
objurl = misc.find_api_page(misc, 'dev', False, timeout=3)
assert strurl == objurl
assert strurl == 'http://devdocs.astropy.org/utils/index.html#module-astropy.utils.misc'
def test_skip_hidden():
path = data._find_pkg_data_path('data')
for root, dirs, files in os.walk(path):
assert '.hidden_file.txt' in files
assert 'local.dat' in files
# break after the first level since the data dir contains some other
# subdirectories that don't have these files
break
for root, dirs, files in misc.walk_skip_hidden(path):
assert '.hidden_file.txt' not in files
assert 'local.dat' in files
break
def test_JsonCustomEncoder():
from astropy import units as u
assert json.dumps(np.arange(3), cls=misc.JsonCustomEncoder) == '[0, 1, 2]'
assert json.dumps(1+2j, cls=misc.JsonCustomEncoder) == '[1.0, 2.0]'
assert json.dumps(set([1, 2, 1]), cls=misc.JsonCustomEncoder) == '[1, 2]'
assert json.dumps(b'hello world \xc3\x85',
cls=misc.JsonCustomEncoder) == '"hello world \\u00c5"'
assert json.dumps({1: 2},
cls=misc.JsonCustomEncoder) == '{"1": 2}' # default
assert json.dumps({1: u.m}, cls=misc.JsonCustomEncoder) == '{"1": "m"}'
# Quantities
tmp = json.dumps({'a': 5*u.cm}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp)
tmpd = {"a": {"unit": "cm", "value": 5.0}}
assert newd == tmpd
tmp2 = json.dumps({'a': np.arange(2)*u.cm}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp2)
tmpd = {"a": {"unit": "cm", "value": [0., 1.]}}
assert newd == tmpd
tmp3 = json.dumps({'a': np.arange(2)*u.erg/u.s}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp3)
tmpd = {"a": {"unit": "erg / s", "value": [0., 1.]}}
assert newd == tmpd
def test_inherit_docstrings():
class Base(metaclass=misc.InheritDocstrings):
def __call__(self, *args):
"FOO"
pass
@property
def bar(self):
"BAR"
pass
class Subclass(Base):
def __call__(self, *args):
pass
@property
def bar(self):
return 42
if Base.__call__.__doc__ is not None:
# TODO: Maybe if __doc__ is None this test should be skipped instead?
assert Subclass.__call__.__doc__ == "FOO"
if Base.bar.__doc__ is not None:
assert Subclass.bar.__doc__ == "BAR"
def test_set_locale():
# First, test if the required locales are available
current = locale.setlocale(locale.LC_ALL)
try:
locale.setlocale(locale.LC_ALL, str('en_US'))
locale.setlocale(locale.LC_ALL, str('de_DE'))
except locale.Error as e:
pytest.skip('Locale error: {}'.format(e))
finally:
locale.setlocale(locale.LC_ALL, current)
date = datetime(2000, 10, 1, 0, 0, 0)
day_mon = date.strftime('%a, %b')
with misc.set_locale('en_US'):
assert date.strftime('%a, %b') == 'Sun, Oct'
with misc.set_locale('de_DE'):
assert date.strftime('%a, %b') == 'So, Okt'
# Back to original
assert date.strftime('%a, %b') == day_mon
with misc.set_locale(current):
assert date.strftime('%a, %b') == day_mon
def test_check_broadcast():
assert misc.check_broadcast((10, 1), (3,)) == (10, 3)
assert misc.check_broadcast((10, 1), (3,), (4, 1, 1, 3)) == (4, 1, 10, 3)
with pytest.raises(ValueError):
misc.check_broadcast((10, 2), (3,))
with pytest.raises(ValueError):
misc.check_broadcast((10, 1), (3,), (4, 1, 2, 3))
def test_dtype_bytes_or_chars():
assert misc.dtype_bytes_or_chars(np.dtype(np.float64)) == 8
assert misc.dtype_bytes_or_chars(np.dtype(object)) is None
assert misc.dtype_bytes_or_chars(np.dtype(np.int32)) == 4
assert misc.dtype_bytes_or_chars(np.array(b'12345').dtype) == 5
assert misc.dtype_bytes_or_chars(np.array(u'12345').dtype) == 5
|
c0d105ce4debff7f04e4cc207235785db9eabd191d3d982971de5ac09494ae98 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test `astropy.utils.timer`.
.. note::
The tests only compare rough estimates as
performance is machine-dependent.
"""
# STDLIB
import time
# THIRD-PARTY
import pytest
import numpy as np
# LOCAL
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.timer import RunTimePredictor
from astropy.modeling.fitting import ModelsError
def func_to_time(x):
"""This sleeps for y seconds for use with timing tests.
.. math::
y = 5 * x - 10
"""
y = 5.0 * np.asarray(x) - 10
time.sleep(y)
return y
def test_timer():
"""Test function timer."""
p = RunTimePredictor(func_to_time)
# --- These must run before data points are introduced. ---
with pytest.raises(ValueError):
p.do_fit()
with pytest.raises(RuntimeError):
p.predict_time(100)
# --- These must run next to set up data points. ---
with pytest.warns(AstropyUserWarning, match="ufunc 'multiply' did not "
"contain a loop with signature matching types"):
p.time_func([2.02, 2.04, 2.1, 'a', 2.3])
p.time_func(2.2) # Test OrderedDict
assert p._funcname == 'func_to_time'
assert p._cache_bad == ['a']
k = list(p.results.keys())
v = list(p.results.values())
np.testing.assert_array_equal(k, [2.02, 2.04, 2.1, 2.3, 2.2])
np.testing.assert_allclose(v, [0.1, 0.2, 0.5, 1.5, 1.0])
# --- These should only run once baseline is established. ---
with pytest.raises(ModelsError):
a = p.do_fit(model='foo')
with pytest.raises(ModelsError):
a = p.do_fit(fitter='foo')
a = p.do_fit()
assert p._power == 1
# Perfect slope is 5, with 10% uncertainty
assert 4.5 <= a[1] <= 5.5
# Perfect intercept is -10, with 1-sec uncertainty
assert -11 <= a[0] <= -9
# --- These should only run once fitting is completed. ---
# Perfect answer is 490, with 10% uncertainty
t = p.predict_time(100)
assert 441 <= t <= 539
# Repeated call to access cached run time
t2 = p.predict_time(100)
assert t == t2
|
45b660fd93764989ee0d5896d5a458ddbc1fadcce07a9f9ed1214ca9cce97b8d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import pytest
from astropy.utils.xml import check, unescaper, writer
try:
import bleach # noqa
HAS_BLEACH = True
except ImportError:
HAS_BLEACH = False
def test_writer():
fh = io.StringIO()
w = writer.XMLWriter(fh)
with w.tag("html"):
with w.tag("body"):
w.data("This is the content")
w.comment("comment")
value = ''.join(fh.getvalue().split())
assert value == '<html><body>Thisisthecontent<!--comment--></body></html>'
def test_check_id():
assert check.check_id("Fof32")
assert check.check_id("_Fof32")
assert not check.check_id("32Fof")
def test_fix_id():
assert check.fix_id("Fof32") == "Fof32"
assert check.fix_id("@#f") == "___f"
def test_check_token():
assert check.check_token("token")
assert not check.check_token("token\rtoken")
def test_check_mime_content_type():
assert check.check_mime_content_type("image/jpeg")
assert not check.check_mime_content_type("image")
def test_check_anyuri():
assert check.check_anyuri("https://github.com/astropy/astropy")
def test_unescape_all():
# str
url_in = 'http://casu.ast.cam.ac.uk/ag/iphas-dsa%2FSubmitCone?' \
'DSACAT=IDR&amp;DSATAB=Emitters&amp;'
url_out = 'http://casu.ast.cam.ac.uk/ag/iphas-dsa/SubmitCone?' \
'DSACAT=IDR&DSATAB=Emitters&'
assert unescaper.unescape_all(url_in) == url_out
# bytes
url_in = b'http://casu.ast.cam.ac.uk/ag/iphas-dsa%2FSubmitCone?' \
b'DSACAT=IDR&amp;DSATAB=Emitters&amp;'
url_out = b'http://casu.ast.cam.ac.uk/ag/iphas-dsa/SubmitCone?' \
b'DSACAT=IDR&DSATAB=Emitters&'
assert unescaper.unescape_all(url_in) == url_out
def test_escape_xml():
s = writer.xml_escape('This & That')
assert type(s) == str
assert s == 'This & That'
s = writer.xml_escape(1)
assert type(s) == str
assert s == '1'
s = writer.xml_escape(b'This & That')
assert type(s) == bytes
assert s == b'This & That'
@pytest.mark.skipif('HAS_BLEACH')
def test_escape_xml_without_bleach():
fh = io.StringIO()
w = writer.XMLWriter(fh)
with pytest.raises(ValueError) as err:
with w.xml_cleaning_method('bleach_clean'):
pass
assert 'bleach package is required when HTML escaping is disabled' in str(err)
@pytest.mark.skipif('not HAS_BLEACH')
def test_escape_xml_with_bleach():
fh = io.StringIO()
w = writer.XMLWriter(fh)
# Turn off XML escaping, but still sanitize unsafe tags like <script>
with w.xml_cleaning_method('bleach_clean'):
w.start('td')
w.data('<script>x</script> <em>OK</em>')
w.end(indent=False)
assert fh.getvalue() == '<td><script>x</script> <em>OK</em></td>\n'
fh = io.StringIO()
w = writer.XMLWriter(fh)
# Default is True (all XML tags escaped)
with w.xml_cleaning_method():
w.start('td')
w.data('<script>x</script> <em>OK</em>')
w.end(indent=False)
assert fh.getvalue() == '<td><script>x</script> <em>OK</em></td>\n'
|
d953ebfd939a56257c66c7c339865ad54de9061d4cc96184f36e4652d0d7222e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Some might be indirectly tested already in ``astropy.io.fits.tests``.
"""
import io
import numpy as np
import pytest
from astropy.utils.diff import diff_values, report_diff_values, where_not_allclose
from astropy.table import Table
@pytest.mark.parametrize('a', [np.nan, np.inf, 1.11, 1, 'a'])
def test_diff_values_false(a):
assert not diff_values(a, a)
@pytest.mark.parametrize(
('a', 'b'),
[(np.inf, np.nan), (1.11, 1.1), (1, 2), (1, 'a'), ('a', 'b')])
def test_diff_values_true(a, b):
assert diff_values(a, b)
def test_float_comparison():
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/21
"""
f = io.StringIO()
a = np.float32(0.029751372)
b = np.float32(0.029751368)
identical = report_diff_values(a, b, fileobj=f)
assert not identical
out = f.getvalue()
# This test doesn't care about what the exact output is, just that it
# did show a difference in their text representations
assert 'a>' in out
assert 'b>' in out
def test_diff_types():
"""
Regression test for https://github.com/astropy/astropy/issues/4122
"""
f = io.StringIO()
a = 1.0
b = '1.0'
identical = report_diff_values(a, b, fileobj=f)
assert not identical
out = f.getvalue()
assert out == (" (float) a> 1.0\n"
" (str) b> '1.0'\n"
" ? + +\n")
def test_diff_numeric_scalar_types():
""" Test comparison of different numeric scalar types. """
f = io.StringIO()
assert not report_diff_values(1.0, 1, fileobj=f)
out = f.getvalue()
assert out == ' (float) a> 1.0\n (int) b> 1\n'
def test_array_comparison():
"""
Test diff-ing two arrays.
"""
f = io.StringIO()
a = np.arange(9).reshape(3, 3)
b = a + 1
identical = report_diff_values(a, b, fileobj=f)
assert not identical
out = f.getvalue()
assert out == (' at [0, 0]:\n'
' a> 0\n'
' b> 1\n'
' at [0, 1]:\n'
' a> 1\n'
' b> 2\n'
' at [0, 2]:\n'
' a> 2\n'
' b> 3\n'
' ...and at 6 more indices.\n')
def test_diff_shaped_array_comparison():
"""
Test diff-ing two differently shaped arrays.
"""
f = io.StringIO()
a = np.empty((1, 2, 3))
identical = report_diff_values(a, a[0], fileobj=f)
assert not identical
out = f.getvalue()
assert out == (' Different array shapes:\n'
' a> (1, 2, 3)\n'
' ? ---\n'
' b> (2, 3)\n')
def test_tablediff():
"""
Test diff-ing two simple Table objects.
"""
a = Table.read("""name obs_date mag_b mag_v
M31 2012-01-02 17.0 16.0
M82 2012-10-29 16.2 15.2
M101 2012-10-31 15.1 15.5""", format='ascii')
b = Table.read("""name obs_date mag_b mag_v
M31 2012-01-02 17.0 16.5
M82 2012-10-29 16.2 15.2
M101 2012-10-30 15.1 15.5
NEW 2018-05-08 nan 9.0""", format='ascii')
f = io.StringIO()
identical = report_diff_values(a, b, fileobj=f)
assert not identical
out = f.getvalue()
assert out == (' name obs_date mag_b mag_v\n'
' ---- ---------- ----- -----\n'
' a> M31 2012-01-02 17.0 16.0\n'
' ? ^\n'
' b> M31 2012-01-02 17.0 16.5\n'
' ? ^\n'
' M82 2012-10-29 16.2 15.2\n'
' a> M101 2012-10-31 15.1 15.5\n'
' ? ^\n'
' b> M101 2012-10-30 15.1 15.5\n'
' ? ^\n'
' b> NEW 2018-05-08 nan 9.0\n')
# Identical
assert report_diff_values(a, a, fileobj=f)
@pytest.mark.parametrize('kwargs', [{}, {'atol': 0, 'rtol': 0}])
def test_where_not_allclose(kwargs):
a = np.array([1, np.nan, np.inf, 4.5])
b = np.array([1, np.inf, np.nan, 4.6])
assert where_not_allclose(a, b, **kwargs) == ([3], )
assert len(where_not_allclose(a, a, **kwargs)[0]) == 0
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.