hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
a5ec36fa5c0dafbaad54a3ed871df1967cbd3539859d69ac43ee6f65d098d8ba | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides functions to help with testing against iraf tasks
"""
import numpy as np
from astropy.logger import log
iraf_models_map = {1.: 'Chebyshev',
2.: 'Legendre',
3.: 'Spline3',
4.: 'Spline1'}
def get_records(fname):
"""
Read the records of an IRAF database file into a python list
Parameters
----------
fname : str
name of an IRAF database file
Returns
-------
A list of records
"""
f = open(fname)
dtb = f.read()
f.close()
recs = dtb.split('begin')[1:]
records = [Record(r) for r in recs]
return records
def get_database_string(fname):
"""
Read an IRAF database file
Parameters
----------
fname : str
name of an IRAF database file
Returns
-------
the database file as a string
"""
f = open(fname)
dtb = f.read()
f.close()
return dtb
class Record:
"""
A base class for all records - represents an IRAF database record
Attributes
----------
recstr: string
the record as a string
fields: dict
the fields in the record
taskname: string
the name of the task which created the database file
"""
def __init__(self, recstr):
self.recstr = recstr
self.fields = self.get_fields()
self.taskname = self.get_task_name()
def aslist(self):
reclist = self.recstr.split('\n')
reclist = [entry.strip() for entry in reclist]
[reclist.remove(entry) for entry in reclist if len(entry) == 0]
return reclist
def get_fields(self):
# read record fields as an array
fields = {}
flist = self.aslist()
numfields = len(flist)
for i in range(numfields):
line = flist[i]
if line and line[0].isalpha():
field = line.split()
if i + 1 < numfields:
if not flist[i + 1][0].isalpha():
fields[field[0]] = self.read_array_field(
flist[i:i + int(field[1]) + 1])
else:
fields[field[0]] = " ".join(s for s in field[1:])
else:
fields[field[0]] = " ".join(s for s in field[1:])
else:
continue
return fields
def get_task_name(self):
try:
return self.fields['task']
except KeyError:
return None
def read_array_field(self, fieldlist):
# Turn an iraf record array field into a numpy array
fieldline = [entry.split() for entry in fieldlist[1:]]
# take only the first 3 columns
# identify writes also strings at the end of some field lines
xyz = [entry[:3] for entry in fieldline]
try:
farr = np.array(xyz)
except Exception:
log.debug(f"Could not read array field {fieldlist[0].split()[0]}")
return farr.astype(np.float64)
class IdentifyRecord(Record):
"""
Represents a database record for the onedspec.identify task
Attributes
----------
x: array
the X values of the identified features
this represents values on axis1 (image rows)
y: int
the Y values of the identified features
(image columns)
z: array
the values which X maps into
modelname: string
the function used to fit the data
nterms: int
degree of the polynomial which was fit to the data
in IRAF this is the number of coefficients, not the order
mrange: list
the range of the data
coeff: array
function (modelname) coefficients
"""
def __init__(self, recstr):
super().__init__(recstr)
self._flatcoeff = self.fields['coefficients'].flatten()
self.x = self.fields['features'][:, 0]
self.y = self.get_ydata()
self.z = self.fields['features'][:, 1]
self.modelname = self.get_model_name()
self.nterms = self.get_nterms()
self.mrange = self.get_range()
self.coeff = self.get_coeff()
def get_model_name(self):
return iraf_models_map[self._flatcoeff[0]]
def get_nterms(self):
return self._flatcoeff[1]
def get_range(self):
low = self._flatcoeff[2]
high = self._flatcoeff[3]
return [low, high]
def get_coeff(self):
return self._flatcoeff[4:]
def get_ydata(self):
image = self.fields['image']
left = image.find('[') + 1
right = image.find(']')
section = image[left:right]
if ',' in section:
yind = image.find(',') + 1
return int(image[yind:-1])
else:
return int(section)
class FitcoordsRecord(Record):
"""
Represents a database record for the longslit.fitccords task
Attributes
----------
modelname: string
the function used to fit the data
xorder: int
number of terms in x
yorder: int
number of terms in y
xbounds: list
data range in x
ybounds: list
data range in y
coeff: array
function coefficients
"""
def __init__(self, recstr):
super().__init__(recstr)
self._surface = self.fields['surface'].flatten()
self.modelname = iraf_models_map[self._surface[0]]
self.xorder = self._surface[1]
self.yorder = self._surface[2]
self.xbounds = [self._surface[4], self._surface[5]]
self.ybounds = [self._surface[6], self._surface[7]]
self.coeff = self.get_coeff()
def get_coeff(self):
return self._surface[8:]
class IDB:
"""
Base class for an IRAF identify database
Attributes
----------
records: list
a list of all `IdentifyRecord` in the database
numrecords: int
number of records
"""
def __init__(self, dtbstr):
self.records = [IdentifyRecord(rstr) for rstr in self.aslist(dtbstr)]
self.numrecords = len(self.records)
def aslist(self, dtb):
# return a list of records
# if the first one is a comment remove it from the list
rl = dtb.split('begin')
try:
rl0 = rl[0].split('\n')
except Exception:
return rl
if len(rl0) == 2 and rl0[0].startswith('#') and not rl0[1].strip():
return rl[1:]
else:
return rl
class ReidentifyRecord(IDB):
"""
Represents a database record for the onedspec.reidentify task
"""
def __init__(self, databasestr):
super().__init__(databasestr)
self.x = np.array([r.x for r in self.records])
self.y = self.get_ydata()
self.z = np.array([r.z for r in self.records])
def get_ydata(self):
y = np.ones(self.x.shape)
y = y * np.array([r.y for r in self.records])[:, np.newaxis]
return y
|
46f6c52d61947fc74e1afb64cb19683aa30bfd69e4f061733935a77b1612941c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Here are all the test parameters and values for the each
`~astropy.modeling.FittableModel` defined. There is a dictionary for 1D and a
dictionary for 2D models.
Explanation of keywords of the dictionaries:
"parameters" : list or dict
Model parameters, the model is tested with. Make sure you keep the right
order. For polynomials you can also use a dict to specify the
coefficients. See examples below.
"x_values" : list
x values where the model is evaluated.
"y_values" : list
Reference y values for the in x_values given positions.
"z_values" : list
Reference z values for the in x_values and y_values given positions.
(2D model option)
"x_lim" : list
x test range for the model fitter. Depending on the model this can differ
e.g. the PowerLaw model should be tested over a few magnitudes.
"y_lim" : list
y test range for the model fitter. Depending on the model this can differ
e.g. the PowerLaw model should be tested over a few magnitudes. (2D model
option)
"log_fit" : bool
PowerLaw models should be tested over a few magnitudes. So log_fit should
be true.
"requires_scipy" : bool
If a model requires scipy (Bessel functions etc.) set this flag.
"integral" : float
Approximate value of the integral in the range x_lim (and y_lim).
"deriv_parameters" : list
If given the test of the derivative will use these parameters to create a
model (optional)
"deriv_initial" : list
If given the test of the derivative will use these parameters as initial
values for the fit (optional)
"""
import numpy as np
from astropy.modeling.functional_models import (
AiryDisk2D, ArcCosine1D, ArcSine1D, ArcTangent1D, Box1D, Box2D, Const1D, Const2D, Cosine1D,
Disk2D, Exponential1D, Gaussian1D, Gaussian2D, KingProjectedAnalytic1D, Linear1D, Logarithmic1D,
Lorentz1D, Moffat1D, Moffat2D, Planar2D, RickerWavelet1D, RickerWavelet2D, Ring2D, Sersic1D,
Sersic2D, Sine1D, Tangent1D, Trapezoid1D, TrapezoidDisk2D, Voigt1D)
from astropy.modeling.physical_models import Drude1D, Plummer1D
from astropy.modeling.polynomial import Polynomial1D, Polynomial2D
from astropy.modeling.powerlaws import (
BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D, PowerLaw1D, Schechter1D,
SmoothlyBrokenPowerLaw1D)
# 1D Models
models_1D = {
Gaussian1D: {
'parameters': [1, 0, 1],
'x_values': [0, np.sqrt(2), -np.sqrt(2)],
'y_values': [1.0, 0.367879, 0.367879],
'x_lim': [-10, 10],
'integral': np.sqrt(2 * np.pi),
'bbox_peak': True
},
Sine1D: {
'parameters': [1, 0.1, 0],
'x_values': [0, 2.5],
'y_values': [0, 1],
'x_lim': [-10, 10],
'integral': 0
},
Cosine1D: {
'parameters': [1, 0.1, 0],
'x_values': [0, 2.5],
'y_values': [1, 0],
'x_lim': [-10, 10],
'integral': 0
},
Tangent1D: {
'parameters': [1, 0.1, 0],
'x_values': [0, 1.25],
'y_values': [0, 1],
'x_lim': [-10, 10],
'integral': 0
},
ArcSine1D: {
'parameters': [1, 0.1, 0],
'x_values': [0, 1],
'y_values': [0, 2.5],
'x_lim': [-0.5, 0.5],
'integral': 0
},
ArcCosine1D: {
'parameters': [1, 0.1, 0],
'x_values': [1, 0],
'y_values': [0, 2.5],
'x_lim': [-0.5, 0.5],
'integral': 0
},
ArcTangent1D: {
'parameters': [1, 0.1, 0],
'x_values': [0, 1],
'y_values': [0, 1.25],
'x_lim': [-10, 10],
'integral': 0
},
Box1D: {
'parameters': [1, 0, 10],
'x_values': [-5, 5, 0, -10, 10],
'y_values': [1, 1, 1, 0, 0],
'x_lim': [-10, 10],
'integral': 10,
'bbox_peak': True
},
Linear1D: {
'parameters': [1, 0],
'x_values': [0, np.pi, 42, -1],
'y_values': [0, np.pi, 42, -1],
'x_lim': [-10, 10],
'integral': 0
},
Lorentz1D: {
'parameters': [1, 0, 1],
'x_values': [0, -1, 1, 0.5, -0.5],
'y_values': [1., 0.2, 0.2, 0.5, 0.5],
'x_lim': [-10, 10],
'integral': 1,
'bbox_peak': True
},
RickerWavelet1D: {
'parameters': [1, 0, 1],
'x_values': [0, 1, -1, 3, -3],
'y_values': [1.0, 0.0, 0.0, -0.088872, -0.088872],
'x_lim': [-20, 20],
'integral': 0,
'bbox_peak': True
},
Trapezoid1D: {
'parameters': [1, 0, 2, 1],
'x_values': [0, 1, -1, 1.5, -1.5, 2, 2],
'y_values': [1, 1, 1, 0.5, 0.5, 0, 0],
'x_lim': [-10, 10],
'integral': 3,
'bbox_peak': True
},
Const1D: {
'parameters': [1],
'x_values': [-1, 1, np.pi, -42., 0],
'y_values': [1, 1, 1, 1, 1],
'x_lim': [-10, 10],
'integral': 20
},
Moffat1D: {
'parameters': [1, 0, 1, 2],
'x_values': [0, 1, -1, 3, -3],
'y_values': [1.0, 0.25, 0.25, 0.01, 0.01],
'x_lim': [-10, 10],
'integral': 1,
'deriv_parameters': [23.4, 1.2, 2.1, 2.3],
'deriv_initial': [10, 1, 1, 1]
},
PowerLaw1D: {
'parameters': [1, 1, 2],
'constraints': {'fixed': {'x_0': True}},
'x_values': [1, 10, 100],
'y_values': [1.0, 0.01, 0.0001],
'x_lim': [1, 10],
'log_fit': True,
'integral': 0.99
},
BrokenPowerLaw1D: {
'parameters': [1, 1, 2, 3],
'constraints': {'fixed': {'x_break': True}},
'x_values': [0.1, 1, 10, 100],
'y_values': [1e2, 1.0, 1e-3, 1e-6],
'x_lim': [0.1, 100],
'log_fit': True
},
SmoothlyBrokenPowerLaw1D: {
'parameters': [1, 1, -2, 2, 0.5],
'constraints': {'fixed': {'x_break': True, 'delta': True}},
'x_values': [0.01, 1, 100],
'y_values': [3.99920012e-04, 1.0, 3.99920012e-04],
'x_lim': [0.01, 100],
'log_fit': True
},
ExponentialCutoffPowerLaw1D: {
'parameters': [1, 1, 2, 3],
'constraints': {'fixed': {'x_0': True}},
'x_values': [0.1, 1, 10, 100],
'y_values': [9.67216100e+01, 7.16531311e-01, 3.56739933e-04,
3.33823780e-19],
'x_lim': [0.01, 100],
'log_fit': True
},
Schechter1D: {
'parameters': [1., -20., -1.],
'x_values': [-25., -23., -21., -19., -17.],
'y_values': [3.42631659e-44, 1.20551329e-07, 7.47097466e-02,
6.18557294e-01, 8.64716111e-01],
'x_lim': [-25, -17.],
},
LogParabola1D: {
'parameters': [1, 2, 3, 0.1],
'constraints': {'fixed': {'x_0': True}},
'x_values': [0.1, 1, 10, 100],
'y_values': [3.26089063e+03, 7.62472488e+00, 6.17440488e-03,
1.73160572e-06],
'x_lim': [0.1, 100],
'log_fit': True
},
Polynomial1D: {
'parameters': {'degree': 2, 'c0': 1., 'c1': 1., 'c2': 1.},
'x_values': [1, 10, 100],
'y_values': [3, 111, 10101],
'x_lim': [-3, 3]
},
Sersic1D: {
'parameters': [1, 20, 4],
'x_values': [0.1, 1, 10, 100],
'y_values': [2.78629391e+02, 5.69791430e+01, 3.38788244e+00,
2.23941982e-02],
'requires_scipy': True,
'x_lim': [0, 10],
'log_fit': True
},
Voigt1D: {
'parameters': [0, 1, 0.5, 0.9],
'x_values': [0, 0.2, 0.5, 1, 2, 4, 8, 20],
'y_values': [0.52092360, 0.479697445, 0.317550374, 0.0988079347,
1.73876624e-2, 4.00173216e-3, 9.82351731e-4, 1.56396993e-4],
'x_lim': [-3, 3]
},
KingProjectedAnalytic1D: {
'parameters': [1, 1, 2],
'x_values': [0, 0.1, 0.5, 0.8],
'y_values': [0.30557281, 0.30011069, 0.2, 0.1113258],
'x_lim': [0, 10],
'y_lim': [0, 10],
'bbox_peak': True
},
Drude1D: {
'parameters': [1.0, 8.0, 1.0],
'x_values': [7.0, 8.0, 9.0, 10.0],
'y_values': [0.17883212, 1.0, 0.21891892, 0.07163324],
'x_lim': [1.0, 20.0],
'y_lim': [0.0, 10.0],
'bbox_peak': True
},
Plummer1D: {
'parameters': [10., 0.5],
'x_values': [1.0000e-03, 2.5005e+00, 5.0000e+00],
'y_values': [1.90984022e+01, 5.53541843e-03, 1.86293603e-04],
'x_lim': [0.001, 100]
},
Exponential1D: {
'parameters': [1, 1],
'x_values': [0, 0.5, 1],
'y_values': [1, np.sqrt(np.e), np.e],
'x_lim': [0, 2],
'integral': (np.e**2 - 1.),
},
Logarithmic1D: {
'parameters': [1, 1],
'x_values': [1, np.e, np.e**2],
'y_values': [0, 1, 2],
'x_lim': [1, np.e**2],
'integral': (np.e**2 + 1),
}
}
# 2D Models
models_2D = {
Gaussian2D: {
'parameters': [1, 0, 0, 1, 1],
'constraints': {'fixed': {'theta': True}},
'x_values': [0, np.sqrt(2), -np.sqrt(2)],
'y_values': [0, np.sqrt(2), -np.sqrt(2)],
'z_values': [1, 1. / np.exp(1) ** 2, 1. / np.exp(1) ** 2],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': 2 * np.pi,
'deriv_parameters': [137., 5.1, 5.4, 1.5, 2., np.pi/4],
'deriv_initial': [10, 5, 5, 4, 4, .5],
'bbox_peak': True
},
Const2D: {
'parameters': [1],
'x_values': [-1, 1, np.pi, -42., 0],
'y_values': [0, 1, 42, np.pi, -1],
'z_values': [1, 1, 1, 1, 1],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': 400
},
Box2D: {
'parameters': [1, 0, 0, 10, 10],
'x_values': [-5, 5, -5, 5, 0, -10, 10],
'y_values': [-5, 5, 0, 0, 0, -10, 10],
'z_values': [1, 1, 1, 1, 1, 0, 0],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': 100,
'bbox_peak': True
},
RickerWavelet2D: {
'parameters': [1, 0, 0, 1],
'x_values': [0, 0, 0, 0, 0, 1, -1, 3, -3],
'y_values': [0, 1, -1, 3, -3, 0, 0, 0, 0],
'z_values': [1.0, 0.303265, 0.303265, -0.038881, -0.038881,
0.303265, 0.303265, -0.038881, -0.038881],
'x_lim': [-10, 11],
'y_lim': [-10, 11],
'integral': 0
},
TrapezoidDisk2D: {
'parameters': [1, 0, 0, 1, 1],
'x_values': [0, 0.5, 0, 1.5],
'y_values': [0, 0.5, 1.5, 0],
'z_values': [1, 1, 0.5, 0.5],
'x_lim': [-3, 3],
'y_lim': [-3, 3],
'bbox_peak': True
},
AiryDisk2D: {
'parameters': [7, 0, 0, 10],
'x_values': [0, 1, -1, -0.5, -0.5],
'y_values': [0, -1, 0.5, 0.5, -0.5],
'z_values': [7., 6.50158267, 6.68490643, 6.87251093, 6.87251093],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'requires_scipy': True
},
Moffat2D: {
'parameters': [1, 0, 0, 1, 2],
'x_values': [0, 1, -1, 3, -3],
'y_values': [0, -1, 3, 1, -3],
'z_values': [1.0, 0.111111, 0.008264, 0.008264, 0.00277],
'x_lim': [-3, 3],
'y_lim': [-3, 3]
},
Polynomial2D: {
'parameters': {'degree': 1, 'c0_0': 1., 'c1_0': 1., 'c0_1': 1.},
'x_values': [1, 2, 3],
'y_values': [1, 3, 2],
'z_values': [3, 6, 6],
'x_lim': [1, 100],
'y_lim': [1, 100]
},
Disk2D: {
'parameters': [1, 0, 0, 5],
'x_values': [-5, 5, -5, 5, 0, -10, 10],
'y_values': [-5, 5, 0, 0, 0, -10, 10],
'z_values': [0, 0, 1, 1, 1, 0, 0],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': np.pi * 5 ** 2,
'bbox_peak': True
},
Ring2D: {
'parameters': [1, 0, 0, 5, 5],
'x_values': [-5, 5, -5, 5, 0, -10, 10],
'y_values': [-5, 5, 0, 0, 0, -10, 10],
'z_values': [1, 1, 1, 1, 0, 0, 0],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': np.pi * (10 ** 2 - 5 ** 2),
'bbox_peak': True
},
Sersic2D: {
'parameters': [1, 25, 4, 50, 50, 0.5, -1],
'x_values': [0.0, 1, 10, 100],
'y_values': [1, 100, 0.0, 10],
'z_values': [1.686398e-02, 9.095221e-02, 2.341879e-02, 9.419231e-02],
'requires_scipy': True,
'x_lim': [1, 1e10],
'y_lim': [1, 1e10]
},
Planar2D: {
'parameters': [1, 1, 0],
'x_values': [0, np.pi, 42, -1],
'y_values': [np.pi, 0, -1, 42],
'z_values': [np.pi, np.pi, 41, 41],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': 0
}
}
|
05f72d03ab34befa51271b252f2954470bcd34238227b4d64b23ed24a98b92d3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Module to test fitting routines
"""
# pylint: disable=invalid-name
import os.path
import unittest.mock as mk
from importlib.metadata import EntryPoint
from itertools import combinations
from unittest import mock
import numpy as np
import pytest
from numpy import linalg
from numpy.testing import assert_allclose, assert_almost_equal, assert_equal
from astropy.modeling import models
from astropy.modeling.core import Fittable2DModel, Parameter
from astropy.modeling.fitting import (
DogBoxLSQFitter, Fitter, FittingWithOutlierRemoval, JointFitter, LevMarLSQFitter,
LinearLSQFitter, LMLSQFitter, NonFiniteValueError, SimplexLSQFitter, SLSQPLSQFitter,
TRFLSQFitter, _NLLSQFitter, populate_entry_points)
from astropy.modeling.optimizers import Optimization
from astropy.stats import sigma_clip
from astropy.utils import NumpyRNGContext
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from . import irafutil
if HAS_SCIPY:
from scipy import optimize
fitters = [SimplexLSQFitter, SLSQPLSQFitter]
non_linear_fitters = [LevMarLSQFitter, TRFLSQFitter, LMLSQFitter, DogBoxLSQFitter]
_RANDOM_SEED = 0x1337
class TestPolynomial2D:
"""Tests for 2D polynomial fitting."""
def setup_class(self):
self.model = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
def poly2(x, y):
return 1 + 2 * x + 3 * x ** 2 + 4 * y + 5 * y ** 2 + 6 * x * y
self.z = poly2(self.x, self.y)
def test_poly2D_fitting(self):
fitter = LinearLSQFitter()
v = self.model.fit_deriv(x=self.x, y=self.y)
p = linalg.lstsq(v, self.z.flatten(), rcond=-1)[0]
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, p)
def test_eval(self):
fitter = LinearLSQFitter()
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model(self.x, self.y), self.z)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_nonlinear_fitting(self, fitter):
fitter = fitter()
self.model.parameters = [.6, 1.8, 2.9, 3.7, 4.9, 6.7]
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, [1, 2, 3, 4, 5, 6])
@pytest.mark.skipif('not HAS_SCIPY')
def test_compare_nonlinear_fitting(self):
self.model.parameters = [.6, 1.8, 2.9, 3.7, 4.9, 6.7]
fit_models = []
for fitter in non_linear_fitters:
fitter = fitter()
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit_models.append(fitter(self.model, self.x, self.y, self.z))
for pair in combinations(fit_models, 2):
assert_allclose(pair[0].parameters, pair[1].parameters)
class TestICheb2D:
"""
Tests 2D Chebyshev polynomial fitting
Create a 2D polynomial (z) using Polynomial2DModel and default coefficients
Fit z using a ICheb2D model
Evaluate the ICheb2D polynomial and compare with the initial z
"""
def setup_class(self):
self.pmodel = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
self.z = self.pmodel(self.x, self.y)
self.cheb2 = models.Chebyshev2D(2, 2)
self.fitter = LinearLSQFitter()
def test_default_params(self):
self.cheb2.parameters = np.arange(9)
p = np.array([1344., 1772., 400., 1860., 2448., 552., 432., 568.,
128.])
z = self.cheb2(self.x, self.y)
model = self.fitter(self.cheb2, self.x, self.y, z)
assert_almost_equal(model.parameters, p)
def test_poly2D_cheb2D(self):
model = self.fitter(self.cheb2, self.x, self.y, self.z)
z1 = model(self.x, self.y)
assert_almost_equal(self.z, z1)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_chebyshev2D_nonlinear_fitting(self, fitter):
fitter = fitter()
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
model = fitter(cheb2d, self.x, self.y, z)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_chebyshev2D_nonlinear_fitting_with_weights(self, fitter):
fitter = fitter()
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
weights = np.ones_like(self.y)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
model = fitter(cheb2d, self.x, self.y, z, weights=weights)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
class TestJointFitter:
"""
Tests the joint fitting routine using 2 gaussian models
"""
def setup_class(self):
"""
Create 2 gaussian models and some data with noise.
Create a fitter for the two models keeping the amplitude parameter
common for the two models.
"""
self.g1 = models.Gaussian1D(10, mean=14.9, stddev=.3)
self.g2 = models.Gaussian1D(10, mean=13, stddev=.4)
self.jf = JointFitter([self.g1, self.g2],
{self.g1: ['amplitude'],
self.g2: ['amplitude']}, [9.8])
self.x = np.arange(10, 20, .1)
y1 = self.g1(self.x)
y2 = self.g2(self.x)
with NumpyRNGContext(_RANDOM_SEED):
n = np.random.randn(100)
self.ny1 = y1 + 2 * n
self.ny2 = y2 + 2 * n
self.jf(self.x, self.ny1, self.x, self.ny2)
def test_joint_parameter(self):
"""
Tests that the amplitude of the two models is the same
"""
assert_allclose(self.jf.fitparams[0], self.g1.parameters[0])
assert_allclose(self.jf.fitparams[0], self.g2.parameters[0])
def test_joint_fitter(self):
"""
Tests the fitting routine with similar procedure.
Compares the fitted parameters.
"""
p1 = [14.9, .3]
p2 = [13, .4]
A = 9.8
p = np.r_[A, p1, p2]
def model(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errfunc(p, x1, y1, x2, y2):
return np.ravel(np.r_[model(p[0], p[1:3], x1) - y1,
model(p[0], p[3:], x2) - y2])
coeff, _ = optimize.leastsq(errfunc, p,
args=(self.x, self.ny1, self.x, self.ny2))
assert_allclose(coeff, self.jf.fitparams, rtol=10 ** (-2))
class TestLinearLSQFitter:
def test_compound_model_raises_error(self):
"""Test that if an user tries to use a compound model, raises an error"""
with pytest.raises(ValueError) as excinfo:
init_model1 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model2 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model_comp = init_model1 + init_model2
x = np.arange(10)
y = init_model_comp(x, model_set_axis=False)
fitter = LinearLSQFitter()
_ = fitter(init_model_comp, x, y)
assert "Model must be simple, not compound" in str(excinfo.value)
def test_chebyshev1D(self):
"""Tests fitting a 1D Chebyshev polynomial to some real world data."""
test_file = get_pkg_data_filename(os.path.join('data',
'idcompspec.fits'))
with open(test_file) as f:
lines = f.read()
reclist = lines.split('begin')
record = irafutil.IdentifyRecord(reclist[1])
coeffs = record.coeff
order = int(record.fields['order'])
initial_model = models.Chebyshev1D(order - 1,
domain=record.get_range())
fitter = LinearLSQFitter()
fitted_model = fitter(initial_model, record.x, record.z)
assert_allclose(fitted_model.parameters, np.array(coeffs),
rtol=10e-2)
def test_linear_fit_model_set(self):
"""Tests fitting multiple models simultaneously."""
init_model = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected,
rtol=1e-1)
def test_linear_fit_2d_model_set(self):
"""Tests fitted multiple 2-D models simultaneously."""
init_model = models.Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)
x = np.arange(10)
y = np.arange(10)
z_expected = init_model(x, y, model_set_axis=False)
assert z_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
z = z_expected + np.random.normal(0, 0.01, size=z_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model(x, y, model_set_axis=False), z_expected,
rtol=1e-1)
def test_linear_fit_fixed_parameter(self):
"""
Tests fitting a polynomial model with a fixed parameter (issue #6135).
"""
init_model = models.Polynomial1D(degree=2, c1=1)
init_model.c1.fixed = True
x = np.arange(10)
y = 2 + x + 0.5*x*x
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [2., 1., 0.5], atol=1e-14)
def test_linear_fit_model_set_fixed_parameter(self):
"""
Tests fitting a polynomial model set with a fixed parameter (#6135).
"""
init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)
init_model.c1.fixed = True
x = np.arange(10)
yy = np.array([2 + x + 0.5*x*x, -2*x])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.c0, [2., 0.], atol=1e-14)
assert_allclose(fitted_model.c1, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c2, [0.5, 0.], atol=1e-14)
def test_linear_fit_2d_model_set_fixed_parameters(self):
"""
Tests fitting a 2d polynomial model set with fixed parameters (#6135).
"""
init_model = models.Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
def test_linear_fit_model_set_masked_values(self):
"""
Tests model set fitting with masked value(s) (#4824, #6819).
"""
# NB. For single models, there is an equivalent doctest.
init_model = models.Polynomial1D(degree=1, n_models=2)
x = np.arange(10)
y = np.ma.masked_array([2*x+1, x-2], mask=np.zeros_like([x, x]))
y[0, 7] = 100. # throw off fit coefficients if unmasked
y.mask[0, 7] = True
y[1, 1:3] = -100.
y.mask[1, 1:3] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.c0, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c1, [2., 1.], atol=1e-14)
def test_linear_fit_2d_model_set_masked_values(self):
"""
Tests 2D model set fitting with masked value(s) (#4824, #6819).
"""
init_model = models.Polynomial2D(1, n_models=2)
x, y = np.mgrid[0:5, 0:5]
z = np.ma.masked_array([2*x+3*y+1, x-0.5*y-2],
mask=np.zeros_like([x, x]))
z[0, 3, 1] = -1000. # throw off fit coefficients if unmasked
z.mask[0, 3, 1] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model.c0_0, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c1_0, [2., 1.], atol=1e-14)
assert_allclose(fitted_model.c0_1, [3., -0.5], atol=1e-14)
@pytest.mark.skipif('not HAS_SCIPY')
class TestNonLinearFitters:
"""Tests non-linear least squares fitting and the SLSQP algorithm."""
def setup_class(self):
self.initial_values = [100, 5, 1]
self.xdata = np.arange(0, 10, 0.1)
sigma = 4. * np.ones_like(self.xdata)
with NumpyRNGContext(_RANDOM_SEED):
yerror = np.random.normal(0, sigma)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
self.ydata = func(self.initial_values, self.xdata) + yerror
self.gauss = models.Gaussian1D(100, 5, stddev=1)
@pytest.mark.parametrize('fitter0', non_linear_fitters)
@pytest.mark.parametrize('fitter1', non_linear_fitters)
def test_estimated_vs_analytic_deriv(self, fitter0, fitter1):
"""
Runs `LevMarLSQFitter` and `TRFLSQFitter` with estimated and
analytic derivatives of a `Gaussian1D`.
"""
fitter0 = fitter0()
model = fitter0(self.gauss, self.xdata, self.ydata)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
fitter1 = fitter1()
emodel = fitter1(g1e, self.xdata, self.ydata, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
@pytest.mark.parametrize('fitter0', non_linear_fitters)
@pytest.mark.parametrize('fitter1', non_linear_fitters)
def test_estimated_vs_analytic_deriv_with_weights(self, fitter0, fitter1):
"""
Runs `LevMarLSQFitter` and `TRFLSQFitter` with estimated and
analytic derivatives of a `Gaussian1D`.
"""
weights = 1.0 / (self.ydata / 10.)
fitter0 = fitter0()
model = fitter0(self.gauss, self.xdata, self.ydata, weights=weights)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
fitter1 = fitter1()
emodel = fitter1(g1e, self.xdata, self.ydata, weights=weights, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_with_optimize(self, fitter):
"""
Tests results from `LevMarLSQFitter` and `TRFLSQFitter` against
`scipy.optimize.leastsq`.
"""
fitter = fitter()
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errfunc(p, x, y):
return func(p, x) - y
result = optimize.leastsq(errfunc, self.initial_values,
args=(self.xdata, self.ydata))
assert_allclose(model.parameters, result[0], rtol=10 ** (-3))
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_with_weights(self, fitter):
"""
Tests results from `LevMarLSQFitter` and `TRFLSQFitter` with weights.
"""
fitter = fitter()
# part 1: weights are equal to 1
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=np.ones_like(self.xdata))
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
# part 2: weights are 0 or 1 (effectively, they are a mask)
weights = np.zeros_like(self.xdata)
weights[::2] = 1.
mask = weights >= 1.
model = fitter(self.gauss, self.xdata[mask], self.ydata[mask],
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=weights)
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
@pytest.mark.filterwarnings(r'ignore:.* Maximum number of iterations reached')
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, '
r'clipping to bounds')
@pytest.mark.parametrize('fitter_class', fitters)
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_fitter_against_LevMar(self, fitter_class, fitter):
"""
Tests results from non-linear fitters against `LevMarLSQFitter`
and `TRFLSQFitter`
"""
fitter = fitter()
fitter_cls = fitter_class()
# This emits a warning from fitter that we need to ignore with
# pytest.mark.filterwarnings above.
new_model = fitter_cls(self.gauss, self.xdata, self.ydata)
model = fitter(self.gauss, self.xdata, self.ydata)
assert_allclose(model.parameters, new_model.parameters,
rtol=10 ** (-4))
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, '
r'clipping to bounds')
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_LSQ_SLSQP_with_constraints(self, fitter):
"""
Runs `LevMarLSQFitter`/`TRFLSQFitter` and `SLSQPLSQFitter` on a
model with constraints.
"""
fitter = fitter()
g1 = models.Gaussian1D(100, 5, stddev=1)
g1.mean.fixed = True
fslsqp = SLSQPLSQFitter()
slsqp_model = fslsqp(g1, self.xdata, self.ydata)
model = fitter(g1, self.xdata, self.ydata)
assert_allclose(model.parameters, slsqp_model.parameters,
rtol=10 ** (-4))
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_non_linear_lsq_fitter_with_weights(self, fitter):
"""
Tests that issue #11581 has been solved.
"""
fitter = fitter()
np.random.seed(42)
norder = 2
fitter2 = LinearLSQFitter()
model = models.Polynomial1D(norder)
npts = 10000
c = [2.0, -10.0, 7.0]
tw = np.random.uniform(0.0, 10.0, npts)
tx = np.random.uniform(0.0, 10.0, npts)
ty = c[0] + c[1] * tx + c[2] * (tx ** 2)
ty += np.random.normal(0.0, 1.5, npts)
with pytest.warns(AstropyUserWarning, match=r'Model is linear in parameters'):
tf1 = fitter(model, tx, ty, weights=tw)
tf2 = fitter2(model, tx, ty, weights=tw)
assert_allclose(tf1.parameters, tf2.parameters,
atol=10 ** (-16))
assert_allclose(tf1.parameters, c,
rtol=10 ** (-2), atol=10 ** (-2))
model = models.Gaussian1D()
if isinstance(fitter, TRFLSQFitter) or isinstance(fitter, LMLSQFitter):
with pytest.warns(AstropyUserWarning, match=r'The fit may be unsuccessful; *.'):
fitter(model, tx, ty, weights=tw)
else:
fitter(model, tx, ty, weights=tw)
model = models.Polynomial2D(norder)
nxpts = 100
nypts = 150
npts = nxpts * nypts
c = [1.0, 4.0, 7.0, -8.0, -9.0, -3.0]
tw = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
tx = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
ty = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
tz = c[0] + c[1] * tx + c[2] * (tx ** 2) + c[3] * ty + c[4] * (ty ** 2) + c[5] * tx * ty
tz += np.random.normal(0.0, 1.5, npts).reshape(nxpts, nypts)
with pytest.warns(AstropyUserWarning, match=r'Model is linear in parameters'):
tf1 = fitter(model, tx, ty, tz, weights=tw)
tf2 = fitter2(model, tx, ty, tz, weights=tw)
assert_allclose(tf1.parameters, tf2.parameters,
atol=10 ** (-16))
assert_allclose(tf1.parameters, c,
rtol=10 ** (-2), atol=10 ** (-2))
def test_simplex_lsq_fitter(self):
"""A basic test for the `SimplexLSQ` fitter."""
class Rosenbrock(Fittable2DModel):
a = Parameter()
b = Parameter()
@staticmethod
def evaluate(x, y, a, b):
return (a - x) ** 2 + b * (y - x ** 2) ** 2
x = y = np.linspace(-3.0, 3.0, 100)
with NumpyRNGContext(_RANDOM_SEED):
z = Rosenbrock.evaluate(x, y, 1.0, 100.0)
z += np.random.normal(0., 0.1, size=z.shape)
fitter = SimplexLSQFitter()
r_i = Rosenbrock(1, 100)
r_f = fitter(r_i, x, y, z)
assert_allclose(r_f.parameters, [1.0, 100.0], rtol=1e-2)
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_param_cov(self, fitter):
"""
Tests that the 'param_cov' fit_info entry gets the right answer for
*linear* least squares, where the answer is exact
"""
fitter = fitter()
a = 2
b = 100
with NumpyRNGContext(_RANDOM_SEED):
x = np.linspace(0, 1, 100)
# y scatter is amplitude ~1 to make sure covarience is
# non-negligible
y = x*a + b + np.random.randn(len(x))
# first compute the ordinary least squares covariance matrix
X = np.vstack([x, np.ones(len(x))]).T
beta = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), y.T)
s2 = (np.sum((y - np.matmul(X, beta).ravel())**2) /
(len(y) - len(beta)))
olscov = np.linalg.inv(np.matmul(X.T, X)) * s2
# now do the non-linear least squares fit
mod = models.Linear1D(a, b)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fmod = fitter(mod, x, y)
assert_allclose(fmod.parameters, beta.ravel())
assert_allclose(olscov, fitter.fit_info['param_cov'])
class TestEntryPoint:
"""Tests population of fitting with entry point fitters"""
def successfulimport(self):
# This should work
class goodclass(Fitter):
__name__ = "GoodClass"
return goodclass
def raiseimporterror(self):
# This should fail as it raises an Import Error
raise ImportError
def returnbadfunc(self):
def badfunc():
# This should import but it should fail type check
pass
return badfunc
def returnbadclass(self):
# This should import But it should fail subclass type check
class badclass:
pass
return badclass
def test_working(self):
"""This should work fine"""
mock_entry_working = mock.create_autospec(EntryPoint)
mock_entry_working.name = "Working"
mock_entry_working.load = self.successfulimport
populate_entry_points([mock_entry_working])
def test_import_error(self):
"""This raises an import error on load to test that it is handled correctly"""
mock_entry_importerror = mock.create_autospec(EntryPoint)
mock_entry_importerror.name = "IErr"
mock_entry_importerror.load = self.raiseimporterror
with pytest.warns(AstropyUserWarning, match=r".*ImportError.*"):
populate_entry_points([mock_entry_importerror])
def test_bad_func(self):
"""This returns a function which fails the type check"""
mock_entry_badfunc = mock.create_autospec(EntryPoint)
mock_entry_badfunc.name = "BadFunc"
mock_entry_badfunc.load = self.returnbadfunc
with pytest.warns(AstropyUserWarning, match=r".*Class.*"):
populate_entry_points([mock_entry_badfunc])
def test_bad_class(self):
"""This returns a class which doesn't inherient from fitter """
mock_entry_badclass = mock.create_autospec(EntryPoint)
mock_entry_badclass.name = "BadClass"
mock_entry_badclass.load = self.returnbadclass
with pytest.warns(AstropyUserWarning, match=r".*BadClass.*"):
populate_entry_points([mock_entry_badclass])
@pytest.mark.skipif('not HAS_SCIPY')
class Test1DFittingWithOutlierRemoval:
def setup_class(self):
self.x = np.linspace(-5., 5., 200)
self.model_params = (3.0, 1.3, 0.8)
def func(p, x):
return p[0]*np.exp(-0.5*(x - p[1])**2/p[2]**2)
self.y = func(self.model_params, self.x)
@pytest.mark.filterwarnings('ignore:The fit may be unsuccessful')
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, '
r'clipping to bounds')
@pytest.mark.parametrize('fitter', non_linear_fitters + fitters)
def test_with_fitters_and_sigma_clip(self, fitter):
import scipy.stats as stats
fitter = fitter()
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.x.shape)
y = self.y + (np.random.normal(0., 0.2, self.x.shape) +
c*np.random.normal(3.0, 5.0, self.x.shape))
g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
fit = FittingWithOutlierRemoval(fitter, sigma_clip,
niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
@pytest.mark.skipif('not HAS_SCIPY')
class Test2DFittingWithOutlierRemoval:
def setup_class(self):
self.y, self.x = np.mgrid[-3:3:128j, -3:3:128j]
self.model_params = (3.0, 1.0, 0.0, 0.8, 0.8)
def Gaussian_2D(p, pos):
return p[0]*np.exp(-0.5*(pos[0] - p[2])**2 / p[4]**2 -
0.5*(pos[1] - p[1])**2 / p[3]**2)
self.z = Gaussian_2D(self.model_params, np.array([self.y, self.x]))
def initial_guess(self, data, pos):
y = pos[0]
x = pos[1]
"""computes the centroid of the data as the initial guess for the
center position"""
wx = x * data
wy = y * data
total_intensity = np.sum(data)
x_mean = np.sum(wx) / total_intensity
y_mean = np.sum(wy) / total_intensity
x_to_pixel = x[0].size / (x[x[0].size - 1][x[0].size - 1] - x[0][0])
y_to_pixel = y[0].size / (y[y[0].size - 1][y[0].size - 1] - y[0][0])
x_pos = np.around(x_mean * x_to_pixel + x[0].size / 2.).astype(int)
y_pos = np.around(y_mean * y_to_pixel + y[0].size / 2.).astype(int)
amplitude = data[y_pos][x_pos]
return amplitude, x_mean, y_mean
@pytest.mark.filterwarnings('ignore:The fit may be unsuccessful')
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, '
r'clipping to bounds')
@pytest.mark.parametrize('fitter', non_linear_fitters + fitters)
def test_with_fitters_and_sigma_clip(self, fitter):
import scipy.stats as stats
fitter = fitter()
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.z.shape)
z = self.z + (np.random.normal(0., 0.2, self.z.shape) +
c*np.random.normal(self.z, 2.0, self.z.shape))
guess = self.initial_guess(self.z, np.array([self.y, self.x]))
g2_init = models.Gaussian2D(amplitude=guess[0], x_mean=guess[1],
y_mean=guess[2], x_stddev=0.75,
y_stddev=1.25)
fit = FittingWithOutlierRemoval(fitter, sigma_clip,
niter=3, sigma=3.)
fitted_model, _ = fit(g2_init, self.x, self.y, z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
def test_1d_set_fitting_with_outlier_removal():
"""Test model set fitting with outlier removal (issue #6819)"""
poly_set = models.Polynomial1D(2, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(),
sigma_clip, sigma=2.5, niter=3,
cenfunc=np.ma.mean, stdfunc=np.ma.std)
x = np.arange(10)
y = np.array([2.5*x - 4, 2*x*x + x + 10])
y[1, 5] = -1000 # outlier
poly_set, filt_y = fitter(poly_set, x, y)
assert_allclose(poly_set.c0, [-4., 10.], atol=1e-14)
assert_allclose(poly_set.c1, [2.5, 1.], atol=1e-14)
assert_allclose(poly_set.c2, [0., 2.], atol=1e-14)
def test_2d_set_axis_2_fitting_with_outlier_removal():
"""Test fitting 2D model set (axis 2) with outlier removal (issue #6819)"""
poly_set = models.Polynomial2D(1, n_models=2, model_set_axis=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(),
sigma_clip, sigma=2.5, niter=3,
cenfunc=np.ma.mean, stdfunc=np.ma.std)
y, x = np.mgrid[0:5, 0:5]
z = np.rollaxis(np.array([x+y, 1-0.1*x+0.2*y]), 0, 3)
z[3, 3:5, 0] = 100. # outliers
poly_set, filt_z = fitter(poly_set, x, y, z)
assert_allclose(poly_set.c0_0, [[[0., 1.]]], atol=1e-14)
assert_allclose(poly_set.c1_0, [[[1., -0.1]]], atol=1e-14)
assert_allclose(poly_set.c0_1, [[[1., 0.2]]], atol=1e-14)
@pytest.mark.skipif('not HAS_SCIPY')
class TestWeightedFittingWithOutlierRemoval:
"""Issue #7020 """
def setup_class(self):
# values of x,y not important as we fit y(x,y) = p0 model here
self.y, self.x = np.mgrid[0:20, 0:20]
self.z = np.mod(self.x + self.y, 2) * 2 - 1 # -1,1 chessboard
self.weights = np.mod(self.x + self.y, 2) * 2 + 1 # 1,3 chessboard
self.z[0, 0] = 1000.0 # outlier
self.z[0, 1] = 1000.0 # outlier
self.x1d = self.x.flatten()
self.z1d = self.z.flatten()
self.weights1d = self.weights.flatten()
def test_1d_without_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d)
assert_allclose(fit.parameters[0], self.z1d.mean(), atol=10**(-2))
def test_1d_without_weights_with_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, mask = fitter(model, self.x1d, self.z1d)
assert((~mask).sum() == self.z1d.size - 2)
assert(mask[0] and mask[1])
assert_allclose(fit.parameters[0], 0.0, atol=10**(-2)) # with removed outliers mean is 0.0
def test_1d_with_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_1d_with_weights_with_sigma_clip(self):
"""
smoke test for #7020 - fails without fitting.py
patch because weights does not propagate
"""
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, filtered = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
# outliers didn't pull it out of [-1:1] because they had been removed
assert(fit.parameters[0] < 1.0)
def test_1d_set_with_common_weights_with_sigma_clip(self):
"""added for #6819 (1D model set with weights in common)"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
z1d = np.array([self.z1d, self.z1d])
fit, filtered = fitter(model, self.x1d, z1d, weights=self.weights1d)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_1d_set_with_weights_with_sigma_clip(self):
"""1D model set with separate weights"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
z1d = np.array([self.z1d, self.z1d])
weights = np.array([self.weights1d, self.weights1d])
fit, filtered = fitter(model, self.x1d, z1d, weights=weights)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_2d_without_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x, self.y, self.z)
assert_allclose(fit.parameters[0], self.z.mean(), atol=10**(-2))
def test_2d_without_weights_with_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, mask = fitter(model, self.x, self.y, self.z)
assert((~mask).sum() == self.z.size - 2)
assert(mask[0, 0] and mask[0, 1])
assert_allclose(fit.parameters[0], 0.0, atol=10**(-2))
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_2d_with_weights_without_sigma_clip(self, fitter):
fitter = fitter()
model = models.Polynomial2D(0)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_2d_linear_with_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LinearLSQFitter() # LinearLSQFitter doesn't handle weights properly in 2D
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
@pytest.mark.parametrize('base_fitter', non_linear_fitters)
def test_2d_with_weights_with_sigma_clip(self, base_fitter):
"""smoke test for #7020 - fails without fitting.py patch because
weights does not propagate"""
base_fitter = base_fitter()
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(base_fitter, sigma_clip,
niter=3, sigma=3.)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit, _ = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
# outliers didn't pull it out of [-1:1] because they had been removed
assert(fit.parameters[0] < 1.0)
def test_2d_linear_with_weights_with_sigma_clip(self):
"""same as test above with a linear fitter."""
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, _ = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
# outliers didn't pull it out of [-1:1] because they had been removed
assert(fit.parameters[0] < 1.0)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_fitters_with_weights(fitter):
"""Issue #5737 """
fitter = fitter()
if isinstance(fitter, _NLLSQFitter):
pytest.xfail("This test is poorly designed and causes issues for "
"scipy.optimize.least_squares based fitters")
Xin, Yin = np.mgrid[0:21, 0:21]
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
# Non-linear model
g2 = models.Gaussian2D(10, 10, 9, 2, 3)
z = g2(Xin, Yin)
gmod = fitter(models.Gaussian2D(15, 7, 8, 1.3, 1.2), Xin, Yin, z + zsig)
assert_allclose(gmod.parameters, g2.parameters, atol=10 ** (-2))
# Linear model
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10)/1.2
z = p2(Xin, Yin)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig)
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
def test_linear_fitter_with_weights():
"""Regression test for #7035"""
Xin, Yin = np.mgrid[0:21, 0:21]
fitter = LinearLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10)/1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig**(-2))
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
def test_linear_fitter_with_weights_flat():
"""Same as the above #7035 test but with flattened inputs"""
Xin, Yin = np.mgrid[0:21, 0:21]
Xin, Yin = Xin.flatten(), Yin.flatten()
fitter = LinearLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10)/1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig**(-2))
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings('ignore:The fit may be unsuccessful')
@pytest.mark.parametrize('fitter', non_linear_fitters + fitters)
def test_fitters_interface(fitter):
"""
Test that ``**kwargs`` work with all optimizers.
This is a basic smoke test.
"""
fitter = fitter()
model = models.Gaussian1D(10, 4, .3)
x = np.arange(21)
y = model(x)
if isinstance(fitter, SimplexLSQFitter):
kwargs = {'maxiter': 77, 'verblevel': 1, 'acc': 1e-6}
else:
kwargs = {'maxiter': 77, 'verblevel': 1, 'epsilon': 1e-2, 'acc': 1e-6}
if isinstance(fitter, LevMarLSQFitter) or isinstance(fitter, _NLLSQFitter):
kwargs.pop('verblevel')
_ = fitter(model, x, y, **kwargs)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter_class', [SLSQPLSQFitter, SimplexLSQFitter])
def test_optimizers(fitter_class):
fitter = fitter_class()
# Test maxiter
assert fitter._opt_method.maxiter == 100
fitter._opt_method.maxiter = 1000
assert fitter._opt_method.maxiter == 1000
# Test eps
assert fitter._opt_method.eps == np.sqrt(np.finfo(float).eps)
fitter._opt_method.eps = 1e-16
assert fitter._opt_method.eps == 1e-16
# Test acc
assert fitter._opt_method.acc == 1e-7
fitter._opt_method.acc = 1e-16
assert fitter._opt_method.acc == 1e-16
# Test repr
assert repr(fitter._opt_method) == f"{fitter._opt_method.__class__.__name__}()"
fitparams = mk.MagicMock()
final_func_val = mk.MagicMock()
numiter = mk.MagicMock()
funcalls = mk.MagicMock()
exit_mode = 1
mess = mk.MagicMock()
xtol = mk.MagicMock()
if fitter_class == SLSQPLSQFitter:
return_value = (fitparams, final_func_val, numiter, exit_mode, mess)
fit_info = {
'final_func_val': final_func_val,
'numiter': numiter,
'exit_mode': exit_mode,
'message': mess
}
else:
return_value = (fitparams, final_func_val, numiter, funcalls, exit_mode)
fit_info = {
'final_func_val': final_func_val,
'numiter': numiter,
'exit_mode': exit_mode,
'num_function_calls': funcalls
}
with mk.patch.object(fitter._opt_method.__class__, 'opt_method',
return_value=return_value):
with pytest.warns(AstropyUserWarning, match=r"The fit may be unsuccessful; .*"):
assert (fitparams, fit_info) == fitter._opt_method(mk.MagicMock(), mk.MagicMock(),
mk.MagicMock(), xtol=xtol)
assert fit_info == fitter._opt_method.fit_info
if isinstance(fitter, SLSQPLSQFitter):
fitter._opt_method.acc == 1e-16
else:
fitter._opt_method.acc == xtol
@mk.patch.multiple(Optimization, __abstractmethods__=set())
def test_Optimization_abstract_call():
optimization = Optimization(mk.MagicMock())
with pytest.raises(NotImplementedError) as err:
optimization()
assert str(err.value) == "Subclasses should implement this method"
def test_fitting_with_outlier_removal_niter():
"""
Test that FittingWithOutlierRemoval stops prior to reaching niter if the
set of masked points has converged and correctly reports the actual number
of iterations performed.
"""
# 2 rows with some noise around a constant level and 1 deviant point:
x = np.arange(25)
with NumpyRNGContext(_RANDOM_SEED):
y = np.random.normal(loc=10., scale=1., size=(2, 25))
y[0, 14] = 100.
# Fit 2 models with up to 5 iterations (should only take 2):
fitter = FittingWithOutlierRemoval(
fitter=LinearLSQFitter(), outlier_func=sigma_clip, niter=5,
sigma_lower=3., sigma_upper=3., maxiters=1
)
model, mask = fitter(models.Chebyshev1D(2, n_models=2), x, y)
# Confirm that only the deviant point was rejected, in 2 iterations:
assert_equal(np.where(mask), [[0], [14]])
assert fitter.fit_info['niter'] == 2
# Refit just the first row without any rejection iterations, to ensure
# there are no regressions for that special case:
fitter = FittingWithOutlierRemoval(
fitter=LinearLSQFitter(), outlier_func=sigma_clip, niter=0,
sigma_lower=3., sigma_upper=3., maxiters=1
)
model, mask = fitter(models.Chebyshev1D(2), x, y[0])
# Confirm that there were no iterations or rejected points:
assert mask.sum() == 0
assert fitter.fit_info['niter'] == 0
@pytest.mark.skipif('not HAS_SCIPY')
class TestFittingUncertanties:
"""
Test that parameter covariance is calculated correctly for the fitters
that do so (currently LevMarLSQFitter, LinearLSQFitter).
"""
example_1D_models = [models.Polynomial1D(2), models.Linear1D()]
example_1D_sets = [models.Polynomial1D(2, n_models=2, model_set_axis=False),
models.Linear1D(n_models=2, slope=[1., 1.], intercept=[0, 0])]
def setup_class(self):
np.random.seed(619)
self.x = np.arange(10)
self.x_grid = np.random.randint(0, 100, size=100).reshape(10, 10)
self.y_grid = np.random.randint(0, 100, size=100).reshape(10, 10)
self.rand_grid = np.random.random(100).reshape(10, 10)
self.rand = self.rand_grid[0]
@pytest.mark.parametrize(('single_model', 'model_set'),
list(zip(example_1D_models, example_1D_sets)))
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_1d_models(self, single_model, model_set, fitter):
""" Test that fitting uncertainties are computed correctly for 1D models
and 1D model sets. Use covariance/stds given by LevMarLSQFitter as
a benchmark since they are returned by the numpy fitter.
"""
fitter = fitter(calc_uncertainties=True)
linlsq_fitter = LinearLSQFitter(calc_uncertainties=True)
# test 1D single models
# fit single model w/ nonlinear fitter
y = single_model(self.x) + self.rand
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit_model = fitter(single_model, self.x, y)
cov_model = fit_model.cov_matrix.cov_matrix
# fit single model w/ linlsq fitter
fit_model_linlsq = linlsq_fitter(single_model, self.x, y)
cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix
# check covariance, stds computed correctly computed
assert_allclose(cov_model_linlsq, cov_model)
assert_allclose(np.sqrt(np.diag(cov_model_linlsq)),
fit_model_linlsq.stds.stds)
# now test 1D model sets
# fit set of models w/ linear fitter
y = model_set(self.x, model_set_axis=False) + np.array([self.rand, self.rand])
fit_1d_set_linlsq = linlsq_fitter(model_set, self.x, y)
cov_1d_set_linlsq = [j.cov_matrix for j in
fit_1d_set_linlsq.cov_matrix]
# make sure cov matrix from single model fit w/ levmar fitter matches
# the cov matrix of first model in the set
assert_allclose(cov_1d_set_linlsq[0], cov_model)
assert_allclose(np.sqrt(np.diag(cov_1d_set_linlsq[0])),
fit_1d_set_linlsq.stds[0].stds)
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_2d_models(self, fitter):
"""
Test that fitting uncertainties are computed correctly for 2D models
and 2D model sets. Use covariance/stds given by LevMarLSQFitter as
a benchmark since they are returned by the numpy fitter.
"""
fitter = fitter(calc_uncertainties=True)
linlsq_fitter = LinearLSQFitter(calc_uncertainties=True)
single_model = models.Polynomial2D(2, c0_0=2)
model_set = models.Polynomial2D(degree=2, n_models=2, c0_0=[2, 3],
model_set_axis=False)
# fit single model w/ nonlinear fitter
z_grid = single_model(self.x_grid, self.y_grid) + self.rand_grid
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit_model = fitter(single_model, self.x_grid, self.y_grid, z_grid)
cov_model = fit_model.cov_matrix.cov_matrix
# fit single model w/ nonlinear fitter
fit_model_linlsq = linlsq_fitter(single_model, self.x_grid,
self.y_grid, z_grid)
cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix
assert_allclose(cov_model, cov_model_linlsq)
assert_allclose(np.sqrt(np.diag(cov_model_linlsq)),
fit_model_linlsq.stds.stds)
# fit 2d model set
z_grid = model_set(self.x_grid, self.y_grid) + np.array((self.rand_grid,
self.rand_grid))
fit_2d_set_linlsq = linlsq_fitter(model_set, self.x_grid, self.y_grid,
z_grid)
cov_2d_set_linlsq = [j.cov_matrix for j in fit_2d_set_linlsq.cov_matrix]
# make sure cov matrix from single model fit w/ levmar fitter matches
# the cov matrix of first model in the set
assert_allclose(cov_2d_set_linlsq[0], cov_model)
assert_allclose(np.sqrt(np.diag(cov_2d_set_linlsq[0])),
fit_2d_set_linlsq.stds[0].stds)
def test_covariance_std_printing_indexing(self, capsys):
"""
Test printing methods and indexing.
"""
# test str representation for Covariance/stds
fitter = LinearLSQFitter(calc_uncertainties=True)
mod = models.Linear1D()
fit_mod = fitter(mod, self.x, mod(self.x)+self.rand)
print(fit_mod.cov_matrix)
captured = capsys.readouterr()
assert "slope | 0.001" in captured.out
assert "intercept| -0.005, 0.03" in captured.out
print(fit_mod.stds)
captured = capsys.readouterr()
assert "slope | 0.032" in captured.out
assert "intercept| 0.173" in captured.out
# test 'pprint' for Covariance/stds
print(fit_mod.cov_matrix.pprint(round_val=5, max_lines=1))
captured = capsys.readouterr()
assert "slope | 0.00105" in captured.out
assert "intercept" not in captured.out
print(fit_mod.stds.pprint(max_lines=1, round_val=5))
captured = capsys.readouterr()
assert "slope | 0.03241" in captured.out
assert "intercept" not in captured.out
# test indexing for Covariance class.
assert fit_mod.cov_matrix[0, 0] == fit_mod.cov_matrix['slope', 'slope']
# test indexing for stds class.
assert fit_mod.stds[1] == fit_mod.stds['intercept']
@pytest.mark.skipif('not HAS_SCIPY')
def test_non_finite_filter():
"""Regression test filter introduced to solve issues #3575 and #12809"""
x = np.array([1, 2, 3, 4, 5, np.nan, 7, np.inf])
y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, 16])
m_init = models.Gaussian1D()
fit = LevMarLSQFitter()
# Raise warning, notice fit fails due to nans
with pytest.raises(NonFiniteValueError, match=r"Objective function has encountered.*"):
fit(m_init, x, y)
|
a4c2eea9fdd68707618f333437d69344cc9f007341aaf76997eeb8b361a4b474 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal, assert_array_less
from astropy import units as u
from astropy.coordinates import Angle
from astropy.modeling import InputParameterError, fitting, models
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa: F401
from astropy.utils.exceptions import AstropyUserWarning
fitters = [
fitting.LevMarLSQFitter,
fitting.TRFLSQFitter,
fitting.LMLSQFitter,
fitting.DogBoxLSQFitter
]
def test_sigma_constant():
"""
Test that the GAUSSIAN_SIGMA_TO_FWHM constant matches the
gaussian_sigma_to_fwhm constant in astropy.stats. We define
it manually in astropy.modeling to avoid importing from
astropy.stats.
"""
from astropy.modeling.functional_models import GAUSSIAN_SIGMA_TO_FWHM
from astropy.stats.funcs import gaussian_sigma_to_fwhm
assert gaussian_sigma_to_fwhm == GAUSSIAN_SIGMA_TO_FWHM
def test_Trapezoid1D():
"""Regression test for https://github.com/astropy/astropy/issues/1721"""
model = models.Trapezoid1D(amplitude=4.2, x_0=2.0, width=1.0, slope=3)
xx = np.linspace(0, 4, 8)
yy = model(xx)
yy_ref = [0., 1.41428571, 3.12857143, 4.2, 4.2, 3.12857143, 1.41428571, 0.]
assert_allclose(yy, yy_ref, rtol=0, atol=1e-6)
def test_Gaussian1D():
model = models.Gaussian1D(4.2, 1.7, stddev=5.1)
x = np.mgrid[0:5]
g = model(x)
g_ref = [3.97302977, 4.16062403, 4.19273985, 4.06574509, 3.79389376]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
assert_allclose(model.fwhm, 12.009582229657841)
def test_Gaussian2D():
"""
Test rotated elliptical Gaussian2D model.
https://github.com/astropy/astropy/pull/2038
"""
model = models.Gaussian2D(4.2, 1.7, 3.1, x_stddev=5.1, y_stddev=3.3,
theta=np.pi/6.)
y, x = np.mgrid[0:5, 0:5]
g = model(x, y)
g_ref = [[3.01907812, 2.99051889, 2.81271552, 2.5119566, 2.13012709],
[3.55982239, 3.6086023, 3.4734158, 3.17454575, 2.75494838],
[3.88059142, 4.0257528, 3.96554926, 3.70908389, 3.29410187],
[3.91095768, 4.15212857, 4.18567526, 4.00652015, 3.64146544],
[3.6440466, 3.95922417, 4.08454159, 4.00113878, 3.72161094]]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
assert_allclose([model.x_fwhm, model.y_fwhm],
[12.009582229657841, 7.7709061486021325])
def test_Gaussian2DCovariance():
"""
Test rotated elliptical Gaussian2D model when cov_matrix is input.
https://github.com/astropy/astropy/pull/2199
"""
cov_matrix = [[49., -16.], [-16., 9.]]
model = models.Gaussian2D(17., 2.0, 2.5, cov_matrix=cov_matrix)
y, x = np.mgrid[0:5, 0:5]
g = model(x, y)
g_ref = [[4.3744505, 5.8413977, 7.42988694, 9.00160175, 10.38794269],
[8.83290201, 10.81772851, 12.61946384, 14.02225593, 14.84113227],
[13.68528889, 15.37184621, 16.44637743, 16.76048705, 16.26953638],
[16.26953638, 16.76048705, 16.44637743, 15.37184621, 13.68528889],
[14.84113227, 14.02225593, 12.61946384, 10.81772851, 8.83290201]]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
# Test bad cov_matrix shape
cov_matrix = [[49., 3.14, -16.],
[3.14, -16., 9.],
[-16, 27, 3.14]]
with pytest.raises(ValueError) as err:
models.Gaussian2D(17., 2.0, 2.5, cov_matrix=cov_matrix)
assert str(err.value) == "Covariance matrix must be 2x2"
def test_Gaussian2DRotation():
amplitude = 42
x_mean, y_mean = 0, 0
x_stddev, y_stddev = 2, 3
theta = Angle(10, 'deg')
pars = dict(amplitude=amplitude, x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev)
rotation = models.Rotation2D(angle=theta.degree)
point1 = (x_mean + 2 * x_stddev, y_mean + 2 * y_stddev)
point2 = rotation(*point1)
g1 = models.Gaussian2D(theta=0, **pars)
g2 = models.Gaussian2D(theta=theta.radian, **pars)
value1 = g1(*point1)
value2 = g2(*point2)
assert_allclose(value1, value2)
def test_Gaussian2D_invalid_inputs():
x_stddev = 5.1
y_stddev = 3.3
theta = 10
cov_matrix = [[49., -16.], [-16., 9.]]
# first make sure the valid ones are OK
models.Gaussian2D()
models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=theta)
models.Gaussian2D(x_stddev=None, y_stddev=y_stddev, theta=theta)
models.Gaussian2D(x_stddev=x_stddev, y_stddev=None, theta=theta)
models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=None)
models.Gaussian2D(cov_matrix=cov_matrix)
with pytest.raises(InputParameterError):
models.Gaussian2D(x_stddev=0, cov_matrix=cov_matrix)
with pytest.raises(InputParameterError):
models.Gaussian2D(y_stddev=0, cov_matrix=cov_matrix)
with pytest.raises(InputParameterError):
models.Gaussian2D(theta=0, cov_matrix=cov_matrix)
def test_Gaussian2D_theta():
theta = Angle(90, 'deg')
model1 = models.Gaussian2D(1, 25, 25, 15, 5, theta=theta)
theta2 = np.pi / 2.
model2 = models.Gaussian2D(1, 25, 25, 15, 5, theta=theta2)
assert model1.theta.quantity.to('radian').value == model2.theta.value
assert model1.bounding_box == model2.bounding_box
assert model1(619.42, 31.314) == model2(619.42, 31.314)
@pytest.mark.parametrize('gamma', (10, -10))
def test_moffat_fwhm(gamma):
ans = 34.641016151377542
kwargs = {'gamma': gamma, 'alpha': 0.5}
m1 = models.Moffat1D(**kwargs)
m2 = models.Moffat2D(**kwargs)
assert_allclose([m1.fwhm, m2.fwhm], ans)
assert_array_less(0, [m1.fwhm, m2.fwhm])
def test_RedshiftScaleFactor():
"""Like ``test_ScaleModel()``."""
# Scale by a scalar
m = models.RedshiftScaleFactor(0.4)
assert m(0) == 0
assert_array_equal(m([1, 2]), [1.4, 2.8])
assert_allclose(m.inverse(m([1, 2])), [1, 2])
# Scale by a list
m = models.RedshiftScaleFactor([-0.5, 0, 0.5], n_models=3)
assert_array_equal(m(0), 0)
assert_array_equal(m([1, 2], model_set_axis=False),
[[0.5, 1], [1, 2], [1.5, 3]])
assert_allclose(m.inverse(m([1, 2], model_set_axis=False)),
[[1, 2], [1, 2], [1, 2]])
def test_RedshiftScaleFactor_inverse():
m = models.RedshiftScaleFactor(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_RedshiftScaleFactor_inverse_bounding_box():
model = models.RedshiftScaleFactor(2)
model.bounding_box = (1, 5)
assert model.bounding_box == (1, 5)
inverse_model = model.inverse
assert inverse_model.bounding_box == (3, 15)
assert_allclose(inverse_model(model(4, with_bounding_box=True), with_bounding_box=True), 4)
@pytest.mark.skipif('not HAS_SCIPY')
def test_RedshiftScaleFactor_model_levmar_fit():
"""Test fitting RedshiftScaleFactor model with LevMarLSQFitter."""
init_model = models.RedshiftScaleFactor()
x = np.arange(10)
y = 2.7174 * x
fitter = fitting.LevMarLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [1.7174])
def test_Ellipse2D():
"""Test Ellipse2D model."""
amplitude = 7.5
x0, y0 = 15, 15
theta = Angle(45, 'deg')
em = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta.radian)
y, x = np.mgrid[0:30, 0:30]
e = em(x, y)
assert np.all(e[e > 0] == amplitude)
assert e[y0, x0] == amplitude
rotation = models.Rotation2D(angle=theta.degree)
point1 = [2, 0] # Rotation2D center is (0, 0)
point2 = rotation(*point1)
point1 = np.array(point1) + [x0, y0]
point2 = np.array(point2) + [x0, y0]
e1 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=0.)
e2 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=theta.radian)
assert e1(*point1) == e2(*point2)
def test_Ellipse2D_circular():
"""Test that circular Ellipse2D agrees with Disk2D [3736]."""
amplitude = 7.5
radius = 10
size = (radius * 2) + 1
y, x = np.mgrid[0:size, 0:size]
ellipse = models.Ellipse2D(amplitude, radius, radius, radius, radius,
theta=0)(x, y)
disk = models.Disk2D(amplitude, radius, radius, radius)(x, y)
assert np.all(ellipse == disk)
def test_Ellipse2D_theta():
theta = Angle(90, 'deg')
model1 = models.Ellipse2D(1, 25, 25, 15, 5, theta=theta)
theta2 = np.pi / 2.
model2 = models.Ellipse2D(1, 25, 25, 15, 5, theta=theta2)
assert model1.theta.quantity.to('radian').value == model2.theta.value
assert model1.bounding_box == model2.bounding_box
assert model1(619.42, 31.314) == model2(619.42, 31.314)
def test_Scale_inverse():
m = models.Scale(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_Scale_inverse_bounding_box():
model = models.Scale(2)
model.bounding_box = (1, 5)
assert model.bounding_box == (1, 5)
inverse_model = model.inverse
assert inverse_model.bounding_box == (2, 10)
assert inverse_model(model(4, with_bounding_box=True), with_bounding_box=True) == 4.0
def test_Multiply_inverse():
m = models.Multiply(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_Multiply_inverse_bounding_box():
model = models.Multiply(2)
model.bounding_box = (1, 5)
assert model.bounding_box == (1, 5)
inverse_model = model.inverse
assert inverse_model.bounding_box == (2, 10)
assert inverse_model(model(4, with_bounding_box=True), with_bounding_box=True) == 4.0
def test_Shift_inverse():
m = models.Shift(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_Shift_inverse_bounding_box():
model = models.Shift(10)
model.bounding_box = (1, 5)
assert model.bounding_box == (1, 5)
inverse_model = model.inverse
assert inverse_model.bounding_box == (11, 15)
assert inverse_model(model(4, with_bounding_box=True), with_bounding_box=True) == 4.0
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', fitters)
def test_Shift_model_levmar_fit(fitter):
"""Test fitting Shift model with LevMarLSQFitter (issue #6103)."""
fitter = fitter()
init_model = models.Shift()
x = np.arange(10)
y = x + 0.1
with pytest.warns(AstropyUserWarning,
match='Model is linear in parameters'):
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [0.1], atol=1e-15)
def test_Shift_model_set_linear_fit():
"""Test linear fitting of Shift model (issue #6103)."""
init_model = models.Shift(offset=[0, 0], n_models=2)
x = np.arange(10)
yy = np.array([x+0.1, x-0.2])
fitter = fitting.LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.parameters, [0.1, -0.2], atol=1e-15)
@pytest.mark.parametrize('Model', (models.Scale, models.Multiply))
def test_Scale_model_set_linear_fit(Model):
"""Test linear fitting of Scale model (#6103)."""
init_model = Model(factor=[0, 0], n_models=2)
x = np.arange(-3, 7)
yy = np.array([1.15*x, 0.96*x])
fitter = fitting.LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.parameters, [1.15, 0.96], atol=1e-15)
@pytest.mark.parametrize('Model', (models.Scale, models.Multiply))
def test_Scale_model_evaluate_without_units(Model):
m = Model(factor=4*u.m)
kwargs = {'x': 3*u.m, 'y': 7*u.m}
mnu = m.without_units_for_data(**kwargs)
x = np.linspace(-1, 1, 100)
assert_allclose(mnu(x), 4*x)
# https://github.com/astropy/astropy/issues/6178
def test_Ring2D_rout():
# Test with none of r_in, r_out, width specified
m = models.Ring2D(amplitude=1, x_0=1, y_0=1)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 1
assert m.width.value == 1
# Test with r_in specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=4)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 4
assert m.width.value == 1
# Test with r_out specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=7)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 1
assert m.width.value == 6
# Error when r_out is too small for default r_in
with pytest.raises(InputParameterError) as err:
models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=0.5)
assert str(err.value) == "r_in=1 and width=-0.5 must both be >=0"
# Test with width specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, width=11)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 1
assert m.width.value == 11
# Test with r_in and r_out specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=2, r_out=5)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 2
assert m.width.value == 3
# Error when r_out is smaller than r_in
with pytest.raises(InputParameterError) as err:
models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=1, r_in=4)
assert str(err.value) == "r_in=4 and width=-3 must both be >=0"
# Test with r_in and width specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=2, width=4)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 2
assert m.width.value == 4
# Test with r_out and width specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=12, width=7)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 5
assert m.width.value == 7
# Error when width is larger than r_out
with pytest.raises(InputParameterError) as err:
models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=1, width=4)
assert str(err.value) == "r_in=-3 and width=4 must both be >=0"
# Test with r_in, r_out, and width all specified
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=3, r_out=11, width=8)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 3
assert m.width.value == 8
# error when specifying all
with pytest.raises(InputParameterError) as err:
models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=3, r_out=11, width=7)
assert str(err.value) == "Width must be r_out - r_in"
@pytest.mark.skipif("not HAS_SCIPY")
@pytest.mark.parametrize('fitter', fitters)
def test_Voigt1D(fitter):
fitter = fitter()
voi = models.Voigt1D(amplitude_L=-0.5, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0)
xarr = np.linspace(-5.0, 5.0, num=40)
yarr = voi(xarr)
voi_init = models.Voigt1D(amplitude_L=-1.0, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0)
voi_fit = fitter(voi_init, xarr, yarr)
assert_allclose(voi_fit.param_sets, voi.param_sets)
# Invalid method
with pytest.raises(ValueError) as err:
models.Voigt1D(method='test')
assert str(err.value) == "Not a valid method for Voigt1D Faddeeva function: test."
@pytest.mark.skipif("not HAS_SCIPY")
@pytest.mark.parametrize('algorithm', ('humlicek2', 'wofz'))
def test_Voigt1D_norm(algorithm):
"""Test integral of normalized Voigt profile."""
from scipy.integrate import quad
voi = models.Voigt1D(amplitude_L=1.0/np.pi, x_0=0.0, fwhm_L=2.0, fwhm_G=1.5, method=algorithm)
if algorithm == 'wofz':
atol = 1e-14
else:
atol = 1e-8
assert_allclose(quad(voi, -np.inf, np.inf)[0], 1.0, atol=atol)
@pytest.mark.skipif("not HAS_SCIPY")
@pytest.mark.parametrize('doppler', (1.e-3, 1.e-2, 0.1, 0.5, 1.0, 2.5, 5.0, 10))
def test_Voigt1D_hum2(doppler):
"""Verify accuracy of Voigt profile in Humlicek approximation to Faddeeva.cc (SciPy)."""
x = np.linspace(-20, 20, 400001)
voi_w = models.Voigt1D(amplitude_L=2.0/np.pi, fwhm_L=1.0, fwhm_G=doppler, method='wofz')
vf_w = voi_w(x)
dvda_w = voi_w.fit_deriv(x, x_0=0, amplitude_L=2.0/np.pi, fwhm_L=1.0, fwhm_G=doppler)
voi_h = models.Voigt1D(amplitude_L=2.0/np.pi, fwhm_L=1.0, fwhm_G=doppler, method='humlicek2')
vf_h = voi_h(x)
dvda_h = voi_h.fit_deriv(x, x_0=0, amplitude_L=2.0/np.pi, fwhm_L=1.0, fwhm_G=doppler)
assert_allclose(vf_h, vf_w, rtol=1e-7 * (2 + 1 / np.sqrt(doppler)))
assert_allclose(dvda_h, dvda_w, rtol=1e-9, atol=1e-7 * (1 + 30 / doppler))
@pytest.mark.skipif("not HAS_SCIPY")
@pytest.mark.parametrize('fitter', fitters)
def test_KingProjectedAnalytic1D_fit(fitter):
fitter = fitter()
km = models.KingProjectedAnalytic1D(amplitude=1, r_core=1, r_tide=2)
xarr = np.linspace(0.1, 2, 10)
yarr = km(xarr)
km_init = models.KingProjectedAnalytic1D(amplitude=1, r_core=1, r_tide=1)
km_fit = fitter(km_init, xarr, yarr)
assert_allclose(km_fit.param_sets, km.param_sets)
assert_allclose(km_fit.concentration, 0.30102999566398136)
@pytest.mark.parametrize('model', [models.Exponential1D(), models.Logarithmic1D()])
def test_ExponentialAndLogarithmic1D_fit(model):
xarr = np.linspace(0.1, 10., 200)
assert_allclose(xarr, model.inverse(model(xarr)))
@pytest.mark.parametrize('model', [models.Exponential1D(), models.Logarithmic1D()])
def test_ExponentialAndLogarithmic_set_tau(model):
message = "0 is not an allowed value for tau"
with pytest.raises(ValueError) as err:
model.tau = 0
assert str(err.value) == message
def test_Linear1D_inverse():
model = models.Linear1D(slope=4, intercept=-12)
inverse = model.inverse
assert inverse.slope == 1/4
assert inverse.intercept == 3
@pytest.mark.parametrize('trig', [(models.Sine1D, [-0.25, 0.25]),
(models.ArcSine1D, [-0.25, 0.25]),
(models.Cosine1D, [0, 0.5]),
(models.ArcCosine1D, [0, 0.5]),
(models.Tangent1D, [-0.25, 0.25]),
(models.ArcTangent1D, [-0.25, 0.25])])
def test_trig_inverse(trig):
mdl = trig[0]()
lower, upper = trig[1]
x = np.arange(lower, upper, 0.01)
assert_allclose(mdl.inverse(mdl(x)), x, atol=1e-10)
assert_allclose(mdl(mdl.inverse(x)), x, atol=1e-10)
@pytest.mark.skipif('not HAS_SCIPY')
def test_Sersic2D_theta():
theta = Angle(90, 'deg')
model1 = models.Sersic2D(1, 5, 4, 25, 25, 0.5, theta=theta)
theta2 = np.pi / 2.
model2 = models.Sersic2D(1, 5, 4, 25, 25, 0.5, theta=theta2)
assert model1.theta.quantity.to('radian').value == model2.theta.value
assert model1(619.42, 31.314) == model2(619.42, 31.314)
|
51ab6aecc2e952e1179f0bf28826ebfcf286e02dd90664e4d2b70f5def12597d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
from inspect import Parameter
import numpy as np
import pytest
from astropy.modeling.utils import (
_SpecialOperatorsDict, _validate_domain_window, get_inputs_and_params, poly_map_domain)
def test_poly_map_domain():
oldx = np.array([1, 2, 3, 4])
# test shift/scale
assert (poly_map_domain(oldx, (-4, 4), (-3, 3)) == [0.75, 1.5, 2.25, 3]).all()
# errors
MESSAGE = 'Expected "domain" and "window" to be a tuple of size 2.'
with pytest.raises(ValueError) as err:
poly_map_domain(oldx, (-4,), (-3, 3))
assert str(err.value) == MESSAGE
with pytest.raises(ValueError) as err:
poly_map_domain(oldx, (-4, 4, -4), (-3, 3))
assert str(err.value) == MESSAGE
with pytest.raises(ValueError) as err:
poly_map_domain(oldx, (-4, 4), (-3,))
assert str(err.value) == MESSAGE
with pytest.raises(ValueError) as err:
poly_map_domain(oldx, (-4, 4), (-3, 3, -3))
assert str(err.value) == MESSAGE
def test__validate_domain_window():
# Test if None
assert _validate_domain_window(None) is None
# Test normal
assert _validate_domain_window((-2, 2)) == (-2, 2)
assert _validate_domain_window([-2, 2]) == (-2, 2)
assert _validate_domain_window(np.array([-2, 2])) == (-2, 2)
# Test error
MESSAGE = 'domain and window should be tuples of size 2.'
with pytest.raises(ValueError) as err:
_validate_domain_window((-2, 2, -2))
assert str(err.value) == MESSAGE
with pytest.raises(ValueError) as err:
_validate_domain_window((-2,))
assert str(err.value) == MESSAGE
with pytest.raises(ValueError) as err:
_validate_domain_window([-2])
assert str(err.value) == MESSAGE
with pytest.raises(ValueError) as err:
_validate_domain_window(np.array([-2]))
assert str(err.value) == MESSAGE
with pytest.raises(ValueError) as err:
_validate_domain_window(-2)
assert str(err.value) == MESSAGE
def test_get_inputs_and_params():
# test normal
def func1(input0, input1, param0=5, param1=7):
pass
inputs, params = get_inputs_and_params(func1)
for index, _input in enumerate(inputs):
assert isinstance(_input, Parameter)
assert _input.name == f"input{index}"
assert _input.kind == _input.POSITIONAL_OR_KEYWORD
assert _input.default == Parameter.empty
default = [5, 7]
for index, param in enumerate(params):
assert isinstance(param, Parameter)
assert param.name == f"param{index}"
assert param.kind == param.POSITIONAL_OR_KEYWORD
assert param.default == default[index]
# Error
MESSAGE = "Signature must not have *args or **kwargs"
def func2(input0, input1, *args, param0=5, param1=7):
pass
def func3(input0, input1, param0=5, param1=7, **kwargs):
pass
with pytest.raises(ValueError) as err:
get_inputs_and_params(func2)
assert str(err.value) == MESSAGE
with pytest.raises(ValueError) as err:
get_inputs_and_params(func3)
assert str(err.value) == MESSAGE
class Test_SpecialOperatorsDict:
def setup(self):
self.key = 'test'
self.val = 'value'
def test__set_value(self):
special_operators = _SpecialOperatorsDict()
assert self.key not in special_operators
special_operators._set_value(self.key, self.val)
assert self.key in special_operators
assert special_operators[self.key] == self.val
with pytest.raises(ValueError, match='Special operator "test" already exists'):
special_operators._set_value(self.key, self.val)
def test___setitem__(self):
special_operators = _SpecialOperatorsDict()
assert self.key not in special_operators
with pytest.deprecated_call():
special_operators[self.key] = self.val
assert self.key in special_operators
assert special_operators[self.key] == self.val
def test__SpecialOperatorsDict__get_unique_id(self):
special_operators = _SpecialOperatorsDict()
assert special_operators._unique_id == 0
assert special_operators._get_unique_id() == 1
assert special_operators._unique_id == 1
assert special_operators._get_unique_id() == 2
assert special_operators._unique_id == 2
assert special_operators._get_unique_id() == 3
assert special_operators._unique_id == 3
def test__SpecialOperatorsDict_add(self):
special_operators = _SpecialOperatorsDict()
operator_name = 'test'
operator = 'operator'
key0 = special_operators.add(operator_name, operator)
assert key0 == (operator_name, special_operators._unique_id)
assert key0 in special_operators
assert special_operators[key0] == operator
key1 = special_operators.add(operator_name, operator)
assert key1 == (operator_name, special_operators._unique_id)
assert key1 in special_operators
assert special_operators[key1] == operator
assert key0 != key1
|
80c12ef4a9b1c9ccece5db2c78c8176d6390c047ac8ddf35ab8b330528f9b7e7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst:
"""
Tests for model evaluation.
Compare the results of some models with other programs.
"""
import unittest.mock as mk
import numpy as np
# pylint: disable=invalid-name, no-member
import pytest
from numpy.testing import assert_allclose, assert_equal
import astropy.modeling.tabular as tabular_models
from astropy import units as u
from astropy.modeling import fitting, models
from astropy.modeling.bounding_box import ModelBoundingBox
from astropy.modeling.core import FittableModel, Model, _ModelMeta
from astropy.modeling.models import Gaussian2D
from astropy.modeling.parameters import InputParameterError, Parameter
from astropy.modeling.polynomial import PolynomialBase
from astropy.modeling.powerlaws import (
BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D, PowerLaw1D,
SmoothlyBrokenPowerLaw1D)
from astropy.modeling.separable import separability_matrix
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils import NumpyRNGContext
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa: F401
from .example_models import models_1D, models_2D
fitters = [
fitting.LevMarLSQFitter,
fitting.TRFLSQFitter,
fitting.LMLSQFitter,
fitting.DogBoxLSQFitter
]
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', fitters)
def test_custom_model(fitter, amplitude=4, frequency=1):
fitter = fitter()
def sine_model(x, amplitude=4, frequency=1):
"""
Model function
"""
return amplitude * np.sin(2 * np.pi * frequency * x)
def sine_deriv(x, amplitude=4, frequency=1):
"""
Jacobian of model function, e.g. derivative of the function with
respect to the *parameters*
"""
da = np.sin(2 * np.pi * frequency * x)
df = 2 * np.pi * x * amplitude * np.cos(2 * np.pi * frequency * x)
return np.vstack((da, df))
SineModel = models.custom_model(sine_model, fit_deriv=sine_deriv)
x = np.linspace(0, 4, 50)
sin_model = SineModel()
sin_model.evaluate(x, 5., 2.)
sin_model.fit_deriv(x, 5., 2.)
np.random.seed(0)
data = sin_model(x) + np.random.rand(len(x)) - 0.5
model = fitter(sin_model, x, data)
assert np.all((np.array([model.amplitude.value, model.frequency.value]) -
np.array([amplitude, frequency])) < 0.001)
def test_custom_model_init():
@models.custom_model
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel(amplitude=2., frequency=0.5)
assert sin_model.amplitude == 2.
assert sin_model.frequency == 0.5
def test_custom_model_defaults():
@models.custom_model
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel()
assert SineModel.amplitude.default == 4
assert SineModel.frequency.default == 1
assert sin_model.amplitude == 4
assert sin_model.frequency == 1
def test_inconsistent_input_shapes():
g = Gaussian2D()
x = np.arange(-1., 1, .2)
y = x.copy()
# check scalar input broadcasting works
assert np.abs(g(x, 0) - g(x, 0 * x)).sum() == 0
# but not array broadcasting
x.shape = (10, 1)
y.shape = (1, 10)
result = g(x, y)
assert result.shape == (10, 10)
def test_custom_model_bounding_box():
"""Test bounding box evaluation for a 3D model"""
def ellipsoid(x, y, z, x0=13, y0=10, z0=8, a=4, b=3, c=2, amp=1):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(models.custom_model(ellipsoid)):
@property
def bounding_box(self):
return ((self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a))
model = Ellipsoid3D()
bbox = model.bounding_box
zlim, ylim, xlim = bbox.bounding_box()
dz, dy, dx = np.diff(bbox) / 2
z1, y1, x1 = np.mgrid[slice(zlim[0], zlim[1] + 1),
slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1)]
z2, y2, x2 = np.mgrid[slice(zlim[0] - dz, zlim[1] + dz + 1),
slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1)]
arr = model(x2, y2, z2, with_bounding_box=True)
sub_arr = model(x1, y1, z1, with_bounding_box=True)
# check for flux agreement
assert abs(np.nansum(arr) - np.nansum(sub_arr)) < np.nansum(arr) * 1e-7
class Fittable2DModelTester:
"""
Test class for all two dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.1
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, .1)
self.y1 = np.arange(1, 10, .1)
self.y2, self.x2 = np.mgrid[:10, :8]
def test_input2D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x, self.y)
model(self.x1, self.y1)
model(self.x2, self.y2)
def test_eval2D(self, model_class, test_parameters):
"""Test model values add certain given points"""
model = create_model(model_class, test_parameters)
x = test_parameters['x_values']
y = test_parameters['y_values']
z = test_parameters['z_values']
assert np.all(np.abs(model(x, y) - z) < self.eval_error)
def test_bounding_box2D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = ((-5, 5), (-5, 5))
assert model.bounding_box == ((-5, 5), (-5, 5))
model.bounding_box = None
with pytest.raises(NotImplementedError):
model.bounding_box
# test the exception of dimensions don't match
with pytest.raises(ValueError):
model.bounding_box = (-5, 5)
del model.bounding_box
try:
bbox = model.bounding_box
except NotImplementedError:
return
ddx = 0.01
ylim, xlim = bbox
x1 = np.arange(xlim[0], xlim[1], ddx)
y1 = np.arange(ylim[0], ylim[1], ddx)
x2 = np.concatenate(([xlim[0] - idx * ddx for idx in range(10, 0, -1)],
x1,
[xlim[1] + idx * ddx for idx in range(1, 10)]))
y2 = np.concatenate(([ylim[0] - idx * ddx for idx in range(10, 0, -1)],
y1,
[ylim[1] + idx * ddx for idx in range(1, 10)]))
inside_bbox = model(x1, y1)
outside_bbox = model(x2, y2, with_bounding_box=True)
outside_bbox = outside_bbox[~np.isnan(outside_bbox)]
assert np.all(inside_bbox == outside_bbox)
def test_bounding_box2D_peak(self, model_class, test_parameters):
if not test_parameters.pop('bbox_peak', False):
return
model = create_model(model_class, test_parameters)
bbox = model.bounding_box
ylim, xlim = bbox
dy, dx = np.diff(bbox)/2
y1, x1 = np.mgrid[slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1)]
y2, x2 = np.mgrid[slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1)]
arr = model(x2, y2)
sub_arr = model(x1, y1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', fitters)
def test_fitter2D(self, model_class, test_parameters, fitter):
"""Test if the parametric model works with the fitter."""
fitter = fitter()
x_lim = test_parameters['x_lim']
y_lim = test_parameters['y_lim']
parameters = test_parameters['parameters']
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.N)
xv, yv = np.meshgrid(x, y)
np.random.seed(0)
# add 10% noise to the amplitude
noise = np.random.rand(self.N, self.N) - 0.5
data = model(xv, yv) + 0.1 * parameters[0] * noise
new_model = fitter(model, xv, yv, data)
params = [getattr(new_model, name) for name in new_model.param_names]
fixed = [param.fixed for param in params]
expected = np.array([val for val, fixed in zip(parameters, fixed)
if not fixed])
fitted = np.array([param.value for param in params
if not param.fixed])
assert_allclose(fitted, expected,
atol=self.fit_error)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', fitters)
def test_deriv_2D(self, model_class, test_parameters, fitter):
"""
Test the derivative of a model by fitting with an estimated and
analytical derivative.
"""
fitter = fitter()
x_lim = test_parameters['x_lim']
y_lim = test_parameters['y_lim']
if model_class.fit_deriv is None or issubclass(model_class, PolynomialBase):
return
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.M)
x_test = np.logspace(x_lim[0], x_lim[1], self.N*10)
y_test = np.logspace(y_lim[0], y_lim[1], self.M*10)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.M)
x_test = np.linspace(x_lim[0], x_lim[1], self.N*10)
y_test = np.linspace(y_lim[0], y_lim[1], self.M*10)
xv, yv = np.meshgrid(x, y)
xv_test, yv_test = np.meshgrid(x_test, y_test)
try:
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
model = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
except KeyError:
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model = create_model(model_class, test_parameters,
use_constraints=False)
# add 10% noise to the amplitude
rsn = np.random.default_rng(0)
amplitude = test_parameters['parameters'][0]
n = 0.1 * amplitude * (rsn.random((self.M, self.N)) - 0.5)
data = model(xv, yv) + n
fitter_with_deriv = fitter
new_model_with_deriv = fitter_with_deriv(model_with_deriv, xv, yv,
data)
fitter_no_deriv = fitter
new_model_no_deriv = fitter_no_deriv(model_no_deriv, xv, yv, data,
estimate_jacobian=True)
assert_allclose(new_model_with_deriv(xv_test, yv_test),
new_model_no_deriv(xv_test, yv_test),
rtol=1e-2)
if model_class != Gaussian2D:
assert_allclose(new_model_with_deriv.parameters,
new_model_no_deriv.parameters,
rtol=0.1)
class Fittable1DModelTester:
"""
Test class for all one dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
# These models will fail fitting test, because built in fitting data
# will produce non-finite values
_non_finite_models = [
BrokenPowerLaw1D,
ExponentialCutoffPowerLaw1D,
LogParabola1D,
PowerLaw1D,
SmoothlyBrokenPowerLaw1D
]
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.11
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, .1)
self.y1 = np.arange(1, 10, .1)
self.y2, self.x2 = np.mgrid[:10, :8]
@pytest.mark.filterwarnings(r'ignore:.*:RuntimeWarning')
def test_input1D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x)
model(self.x1)
model(self.x2)
def test_eval1D(self, model_class, test_parameters):
"""
Test model values at certain given points
"""
model = create_model(model_class, test_parameters)
x = test_parameters['x_values']
y = test_parameters['y_values']
assert_allclose(model(x), y, atol=self.eval_error)
def test_bounding_box1D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = (-5, 5)
model.bounding_box = None
with pytest.raises(NotImplementedError):
model.bounding_box
del model.bounding_box
# test exception if dimensions don't match
with pytest.raises(ValueError):
model.bounding_box = 5
try:
bbox = model.bounding_box.bounding_box()
except NotImplementedError:
return
ddx = 0.01
x1 = np.arange(bbox[0], bbox[1], ddx)
x2 = np.concatenate(([bbox[0] - idx * ddx for idx in range(10, 0, -1)],
x1,
[bbox[1] + idx * ddx for idx in range(1, 10)]))
inside_bbox = model(x1)
outside_bbox = model(x2, with_bounding_box=True)
outside_bbox = outside_bbox[~np.isnan(outside_bbox)]
assert np.all(inside_bbox == outside_bbox)
def test_bounding_box1D_peak(self, model_class, test_parameters):
if not test_parameters.pop('bbox_peak', False):
return
model = create_model(model_class, test_parameters)
bbox = model.bounding_box
if isinstance(model, models.Lorentz1D) or isinstance(model, models.Drude1D):
rtol = 0.01 # 1% agreement is enough due to very extended wings
ddx = 0.1 # Finer sampling to "integrate" flux for narrow peak
else:
rtol = 1e-7
ddx = 1
if isinstance(bbox, ModelBoundingBox):
bbox = bbox.bounding_box()
dx = np.diff(bbox) / 2
x1 = np.mgrid[slice(bbox[0], bbox[1] + 1, ddx)]
x2 = np.mgrid[slice(bbox[0] - dx, bbox[1] + dx + 1, ddx)]
arr = model(x2)
sub_arr = model(x1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * rtol
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', fitters)
def test_fitter1D(self, model_class, test_parameters, fitter):
"""
Test if the parametric model works with the fitter.
"""
fitter = fitter()
x_lim = test_parameters['x_lim']
parameters = test_parameters['parameters']
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
np.random.seed(0)
# add 10% noise to the amplitude
relative_noise_amplitude = 0.01
data = ((1 + relative_noise_amplitude * np.random.randn(len(x))) *
model(x))
new_model = fitter(model, x, data)
# Only check parameters that were free in the fit
params = [getattr(new_model, name) for name in new_model.param_names]
fixed = [param.fixed for param in params]
expected = np.array([val for val, fixed in zip(parameters, fixed)
if not fixed])
fitted = np.array([param.value for param in params
if not param.fixed])
if model_class == models.BrokenPowerLaw1D:
atol = 3.75
else:
atol = self.fit_error
assert_allclose(fitted, expected, atol=atol)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings(r'ignore:.*:RuntimeWarning')
@pytest.mark.parametrize('fitter', fitters)
def test_deriv_1D(self, model_class, test_parameters, fitter):
"""
Test the derivative of a model by comparing results with an estimated
derivative.
"""
fitter = fitter()
if model_class in self._non_finite_models:
return
x_lim = test_parameters['x_lim']
if model_class.fit_deriv is None or issubclass(model_class, PolynomialBase):
return
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
parameters = test_parameters['parameters']
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False)
# NOTE: PR 10644 replaced deprecated usage of RandomState but could not
# find a new seed that did not cause test failure, resorted to hardcoding.
# add 10% noise to the amplitude
rsn_rand_1234567890 = np.array([
0.61879477, 0.59162363, 0.88868359, 0.89165480, 0.45756748,
0.77818808, 0.26706377, 0.99610621, 0.54009489, 0.53752161,
0.40099938, 0.70540579, 0.40518559, 0.94999075, 0.03075388,
0.13602495, 0.08297726, 0.42352224, 0.23449723, 0.74743526,
0.65177865, 0.68998682, 0.16413419, 0.87642114, 0.44733314,
0.57871104, 0.52377835, 0.62689056, 0.34869427, 0.26209748,
0.07498055, 0.17940570, 0.82999425, 0.98759822, 0.11326099,
0.63846415, 0.73056694, 0.88321124, 0.52721004, 0.66487673,
0.74209309, 0.94083846, 0.70123128, 0.29534353, 0.76134369,
0.77593881, 0.36985514, 0.89519067, 0.33082813, 0.86108824,
0.76897859, 0.61343376, 0.43870907, 0.91913538, 0.76958966,
0.51063556, 0.04443249, 0.57463611, 0.31382006, 0.41221713,
0.21531811, 0.03237521, 0.04166386, 0.73109303, 0.74556052,
0.64716325, 0.77575353, 0.64599254, 0.16885816, 0.48485480,
0.53844248, 0.99690349, 0.23657074, 0.04119088, 0.46501519,
0.35739006, 0.23002665, 0.53420791, 0.71639475, 0.81857486,
0.73994342, 0.07948837, 0.75688276, 0.13240193, 0.48465576,
0.20624753, 0.02298276, 0.54257873, 0.68123230, 0.35887468,
0.36296147, 0.67368397, 0.29505730, 0.66558885, 0.93652252,
0.36755130, 0.91787687, 0.75922703, 0.48668067, 0.45967890])
n = 0.1 * parameters[0] * (rsn_rand_1234567890 - 0.5)
data = model_with_deriv(x) + n
fitter_with_deriv = fitter
new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data)
fitter_no_deriv = fitter
new_model_no_deriv = fitter_no_deriv(model_no_deriv, x, data,
estimate_jacobian=True)
assert_allclose(new_model_with_deriv.parameters,
new_model_no_deriv.parameters, atol=0.15)
def create_model(model_class, test_parameters, use_constraints=True,
parameter_key='parameters'):
"""Create instance of model class."""
constraints = {}
if issubclass(model_class, PolynomialBase):
return model_class(**test_parameters[parameter_key])
elif issubclass(model_class, FittableModel):
if "requires_scipy" in test_parameters and not HAS_SCIPY:
pytest.skip("SciPy not found")
if use_constraints:
if 'constraints' in test_parameters:
constraints = test_parameters['constraints']
return model_class(*test_parameters[parameter_key], **constraints)
@pytest.mark.filterwarnings(r'ignore:Model is linear in parameters.*')
@pytest.mark.filterwarnings(r'ignore:The fit may be unsuccessful.*')
@pytest.mark.parametrize(('model_class', 'test_parameters'),
sorted(models_1D.items(), key=lambda x: str(x[0])))
class TestFittable1DModels(Fittable1DModelTester):
pass
@pytest.mark.filterwarnings(r'ignore:Model is linear in parameters.*')
@pytest.mark.parametrize(('model_class', 'test_parameters'),
sorted(models_2D.items(), key=lambda x: str(x[0])))
class TestFittable2DModels(Fittable2DModelTester):
pass
def test_ShiftModel():
# Shift by a scalar
m = models.Shift(42)
assert m(0) == 42
assert_equal(m([1, 2]), [43, 44])
# Shift by a list
m = models.Shift([42, 43], n_models=2)
assert_equal(m(0), [42, 43])
assert_equal(m([1, 2], model_set_axis=False),
[[43, 44], [44, 45]])
def test_ScaleModel():
# Scale by a scalar
m = models.Scale(42)
assert m(0) == 0
assert_equal(m([1, 2]), [42, 84])
# Scale by a list
m = models.Scale([42, 43], n_models=2)
assert_equal(m(0), [0, 0])
assert_equal(m([1, 2], model_set_axis=False),
[[42, 84], [43, 86]])
def test_voigt_model():
"""
Currently just tests that the model peaks at its origin.
Regression test for https://github.com/astropy/astropy/issues/3942
"""
m = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
x = np.arange(0, 10, 0.01)
y = m(x)
assert y[500] == y.max() # y[500] is right at the center
def test_model_instance_repr():
m = models.Gaussian1D(1.5, 2.5, 3.5)
assert repr(m) == '<Gaussian1D(amplitude=1.5, mean=2.5, stddev=3.5)>'
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_interp_1d():
"""
Test Tabular1D model.
"""
points = np.arange(0, 5)
values = [1., 10, 2, 45, -3]
LookupTable = models.tabular_model(1)
model = LookupTable(points=points, lookup_table=values)
xnew = [0., .7, 1.4, 2.1, 3.9]
ans1 = [1., 7.3, 6.8, 6.3, 1.8]
assert_allclose(model(xnew), ans1)
# Test evaluate without passing `points`.
model = LookupTable(lookup_table=values)
assert_allclose(model(xnew), ans1)
# Test bounds error.
xextrap = [0., .7, 1.4, 2.1, 3.9, 4.1]
with pytest.raises(ValueError):
model(xextrap)
# test extrapolation and fill value
model = LookupTable(lookup_table=values, bounds_error=False,
fill_value=None)
assert_allclose(model(xextrap),
[1., 7.3, 6.8, 6.3, 1.8, -7.8])
# Test unit support
xnew = xnew * u.nm
ans1 = ans1 * u.nJy
model = LookupTable(points=points*u.nm, lookup_table=values*u.nJy)
assert_quantity_allclose(model(xnew), ans1)
assert_quantity_allclose(model(xnew.to(u.nm)), ans1)
assert model.bounding_box == (0 * u.nm, 4 * u.nm)
# Test fill value unit conversion and unitless input on table with unit
model = LookupTable([1, 2, 3], [10, 20, 30] * u.nJy, bounds_error=False,
fill_value=1e-33*(u.W / (u.m * u.m * u.Hz)))
assert_quantity_allclose(model(np.arange(5)),
[100, 10, 20, 30, 100] * u.nJy)
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_interp_2d():
table = np.array([
[-0.04614432, -0.02512547, -0.00619557, 0.0144165, 0.0297525],
[-0.04510594, -0.03183369, -0.01118008, 0.01201388, 0.02496205],
[-0.05464094, -0.02804499, -0.00960086, 0.01134333, 0.02284104],
[-0.04879338, -0.02539565, -0.00440462, 0.01795145, 0.02122417],
[-0.03637372, -0.01630025, -0.00157902, 0.01649774, 0.01952131]])
points = np.arange(0, 5)
points = (points, points)
xnew = np.array([0., .7, 1.4, 2.1, 3.9])
LookupTable = models.tabular_model(2)
model = LookupTable(points, table)
znew = model(xnew, xnew)
result = np.array(
[-0.04614432, -0.03450009, -0.02241028, -0.0069727, 0.01938675])
assert_allclose(znew, result, atol=1e-7)
# test 2D arrays as input
a = np.arange(12).reshape((3, 4))
y, x = np.mgrid[:3, :4]
t = models.Tabular2D(lookup_table=a)
r = t(y, x)
assert_allclose(a, r)
with pytest.raises(ValueError):
model = LookupTable(points=([1.2, 2.3], [1.2, 6.7], [3, 4]))
with pytest.raises(ValueError):
model = LookupTable(lookup_table=[1, 2, 3])
with pytest.raises(NotImplementedError):
model = LookupTable(n_models=2)
with pytest.raises(ValueError):
model = LookupTable(([1, 2], [3, 4]), [5, 6])
with pytest.raises(ValueError):
model = LookupTable(([1, 2] * u.m, [3, 4]), [[5, 6], [7, 8]])
with pytest.raises(ValueError):
model = LookupTable(points, table, bounds_error=False,
fill_value=1*u.Jy)
# Test unit support
points = points[0] * u.nm
points = (points, points)
xnew = xnew * u.nm
model = LookupTable(points, table * u.nJy)
result = result * u.nJy
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7*u.nJy)
xnew = xnew.to(u.m)
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7*u.nJy)
bbox = (0 * u.nm, 4 * u.nm)
bbox = (bbox, bbox)
assert model.bounding_box == bbox
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_nd():
a = np.arange(24).reshape((2, 3, 4))
x, y, z = np.mgrid[:2, :3, :4]
tab = models.tabular_model(3)
t = tab(lookup_table=a)
result = t(x, y, z)
assert_allclose(a, result)
with pytest.raises(ValueError):
models.tabular_model(0)
def test_with_bounding_box():
"""
Test the option to evaluate a model respecting
its bunding_box.
"""
p = models.Polynomial2D(2) & models.Polynomial2D(2)
m = models.Mapping((0, 1, 0, 1)) | p
with NumpyRNGContext(1234567):
m.parameters = np.random.rand(12)
m.bounding_box = ((3, 9), (1, 8))
x, y = np.mgrid[:10, :10]
a, b = m(x, y)
aw, bw = m(x, y, with_bounding_box=True)
ind = (~np.isnan(aw)).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
aw, bw = m(x, y, with_bounding_box=True, fill_value=1000)
ind = (aw != 1000).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
# test the order of bbox is not reversed for 1D models
p = models.Polynomial1D(1, c0=12, c1=2.3)
p.bounding_box = (0, 5)
assert(p(1) == p(1, with_bounding_box=True))
t3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)
t3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))
assert_allclose(t3([1, 1], [7, 7], [3, 5], with_bounding_box=True),
[[np.nan, 11], [np.nan, 14], [np.nan, 4]])
trans3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)
trans3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))
assert_allclose(trans3(1, 7, 5, with_bounding_box=True), [11, 14, 4])
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_with_bounding_box():
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
result = t(1, with_bounding_box=True)
assert result == 3.4
assert t.inverse(result, with_bounding_box=True) == 1.
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_bounding_box_with_units():
points = np.arange(5)*u.pix
lt = np.arange(5)*u.AA
t = models.Tabular1D(points, lt)
result = t(1*u.pix, with_bounding_box=True)
assert result == 1.*u.AA
assert t.inverse(result, with_bounding_box=True) == 1*u.pix
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular1d_inverse():
"""Test that the Tabular1D inverse is defined"""
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
result = t.inverse((3.4, 6.7))
assert_allclose(result, np.array((1., 2.)))
# Check that it works for descending values in lookup_table
t2 = models.Tabular1D(points, values[::-1])
assert_allclose(t2.inverse.points[0], t2.lookup_table[::-1])
result2 = t2.inverse((7, 6.7))
assert_allclose(result2, np.array((1., 2.)))
# Check that it errors on double-valued lookup_table
points = np.arange(5)
values = np.array([1.5, 3.4, 3.4, 32, 25])
t = models.Tabular1D(points, values)
with pytest.raises(NotImplementedError):
t.inverse((3.4, 7.))
# Check that Tabular2D.inverse raises an error
table = np.arange(5*5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t3 = models.Tabular2D(points=points, lookup_table=table)
with pytest.raises(NotImplementedError):
t3.inverse((3, 3))
# Check that it uses the same kwargs as the original model
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
with pytest.raises(ValueError):
t.inverse(100)
t = models.Tabular1D(points, values, bounds_error=False, fill_value=None)
result = t.inverse(100)
assert_allclose(t(result), 100)
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_grid_shape_mismatch_error():
points = np.arange(5)
lt = np.mgrid[0:5, 0:5][0]
with pytest.raises(ValueError) as err:
models.Tabular2D(points, lt)
assert str(err.value) == "Expected grid points in 2 directions, got 5."
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_repr():
points = np.arange(5)
lt = np.arange(5)
t = models.Tabular1D(points, lt)
assert repr(t) == "<Tabular1D(points=(array([0, 1, 2, 3, 4]),), lookup_table=[0 1 2 3 4])>"
table = np.arange(5*5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t = models.Tabular2D(points=points, lookup_table=table)
assert repr(t) == (
"<Tabular2D(points=(array([0, 1, 2, 3, 4]), array([0, 1, 2, 3, 4])), "
"lookup_table=[[ 0 1 2 3 4]\n"
" [ 5 6 7 8 9]\n"
" [10 11 12 13 14]\n"
" [15 16 17 18 19]\n"
" [20 21 22 23 24]])>"
)
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_str():
points = np.arange(5)
lt = np.arange(5)
t = models.Tabular1D(points, lt)
assert str(t) == (
"Model: Tabular1D\n"
"N_inputs: 1\n"
"N_outputs: 1\n"
"Parameters: \n"
" points: (array([0, 1, 2, 3, 4]),)\n"
" lookup_table: [0 1 2 3 4]\n"
" method: linear\n"
" fill_value: nan\n"
" bounds_error: True"
)
table = np.arange(5*5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t = models.Tabular2D(points=points, lookup_table=table)
assert str(t) == (
"Model: Tabular2D\n"
"N_inputs: 2\n"
"N_outputs: 1\n"
"Parameters: \n"
" points: (array([0, 1, 2, 3, 4]), array([0, 1, 2, 3, 4]))\n"
" lookup_table: [[ 0 1 2 3 4]\n"
" [ 5 6 7 8 9]\n"
" [10 11 12 13 14]\n"
" [15 16 17 18 19]\n"
" [20 21 22 23 24]]\n"
" method: linear\n"
" fill_value: nan\n"
" bounds_error: True"
)
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_evaluate():
points = np.arange(5)
lt = np.arange(5)[::-1]
t = models.Tabular1D(points, lt)
assert (t.evaluate([1, 2, 3]) == [3, 2, 1]).all()
assert (t.evaluate(np.array([1, 2, 3]) * u.m) == [3, 2, 1]).all()
t.n_outputs = 2
value = [np.array([3, 2, 1]), np.array([1, 2, 3])]
with mk.patch.object(tabular_models, 'interpn', autospec=True, return_value=value) as mkInterpn:
outputs = t.evaluate([1, 2, 3])
for index, output in enumerate(outputs):
assert np.all(value[index] == output)
assert mkInterpn.call_count == 1
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_module_name():
"""
The module name must be set manually because
these classes are created dynamically.
"""
for model in [models.Tabular1D, models.Tabular2D]:
assert model.__module__ == "astropy.modeling.tabular"
class classmodel(FittableModel):
f = Parameter(default=1)
x = Parameter(default=0)
y = Parameter(default=2)
def __init__(self, f=f.default, x=x.default, y=y.default):
super().__init__(f, x, y)
def evaluate(self):
pass
class subclassmodel(classmodel):
f = Parameter(default=3, fixed=True)
x = Parameter(default=10)
y = Parameter(default=12)
h = Parameter(default=5)
def __init__(self, f=f.default, x=x.default, y=y.default, h=h.default):
super().__init__(f, x, y)
def evaluate(self):
pass
def test_parameter_inheritance():
b = subclassmodel()
assert b.param_names == ('f', 'x', 'y', 'h')
assert b.h == 5
assert b.f == 3
assert b.f.fixed == True # noqa: E712
def test_parameter_description():
model = models.Gaussian1D(1.5, 2.5, 3.5)
assert model.amplitude._description == "Amplitude (peak value) of the Gaussian"
assert model.mean._description == "Position of peak (Gaussian)"
model = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
assert model.amplitude_L._description == "The Lorentzian amplitude"
assert model.fwhm_L._description == "The Lorentzian full width at half maximum"
assert model.fwhm_G._description == "The Gaussian full width at half maximum"
def test_SmoothlyBrokenPowerLaw1D_validators():
with pytest.raises(InputParameterError) as err:
SmoothlyBrokenPowerLaw1D(amplitude=-1)
assert str(err.value) == "amplitude parameter must be > 0"
with pytest.raises(InputParameterError) as err:
SmoothlyBrokenPowerLaw1D(delta=0)
assert str(err.value) == "delta parameter must be >= 0.001"
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings(r'ignore:.*:RuntimeWarning')
@pytest.mark.filterwarnings(r'ignore:The fit may be unsuccessful.*')
def test_SmoothlyBrokenPowerLaw1D_fit_deriv():
x_lim = [0.01, 100]
x = np.logspace(x_lim[0], x_lim[1], 100)
parameters = {'parameters': [1, 10, -2, 2, 0.5],
'constraints': {'fixed': {'x_break': True, 'delta': True}}}
model_with_deriv = create_model(SmoothlyBrokenPowerLaw1D, parameters,
use_constraints=False)
model_no_deriv = create_model(SmoothlyBrokenPowerLaw1D, parameters,
use_constraints=False)
# NOTE: PR 10644 replaced deprecated usage of RandomState but could not
# find a new seed that did not cause test failure, resorted to hardcoding.
# add 10% noise to the amplitude
rsn_rand_1234567890 = np.array([
0.61879477, 0.59162363, 0.88868359, 0.89165480, 0.45756748,
0.77818808, 0.26706377, 0.99610621, 0.54009489, 0.53752161,
0.40099938, 0.70540579, 0.40518559, 0.94999075, 0.03075388,
0.13602495, 0.08297726, 0.42352224, 0.23449723, 0.74743526,
0.65177865, 0.68998682, 0.16413419, 0.87642114, 0.44733314,
0.57871104, 0.52377835, 0.62689056, 0.34869427, 0.26209748,
0.07498055, 0.17940570, 0.82999425, 0.98759822, 0.11326099,
0.63846415, 0.73056694, 0.88321124, 0.52721004, 0.66487673,
0.74209309, 0.94083846, 0.70123128, 0.29534353, 0.76134369,
0.77593881, 0.36985514, 0.89519067, 0.33082813, 0.86108824,
0.76897859, 0.61343376, 0.43870907, 0.91913538, 0.76958966,
0.51063556, 0.04443249, 0.57463611, 0.31382006, 0.41221713,
0.21531811, 0.03237521, 0.04166386, 0.73109303, 0.74556052,
0.64716325, 0.77575353, 0.64599254, 0.16885816, 0.48485480,
0.53844248, 0.99690349, 0.23657074, 0.04119088, 0.46501519,
0.35739006, 0.23002665, 0.53420791, 0.71639475, 0.81857486,
0.73994342, 0.07948837, 0.75688276, 0.13240193, 0.48465576,
0.20624753, 0.02298276, 0.54257873, 0.68123230, 0.35887468,
0.36296147, 0.67368397, 0.29505730, 0.66558885, 0.93652252,
0.36755130, 0.91787687, 0.75922703, 0.48668067, 0.45967890])
n = 0.1 * parameters['parameters'][0] * (rsn_rand_1234567890 - 0.5)
data = model_with_deriv(x) + n
fitter_with_deriv = fitting.LevMarLSQFitter()
new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data)
fitter_no_deriv = fitting.LevMarLSQFitter()
new_model_no_deriv = fitter_no_deriv(model_no_deriv, x, data,
estimate_jacobian=True)
assert_allclose(new_model_with_deriv.parameters,
new_model_no_deriv.parameters, atol=0.5)
class _ExtendedModelMeta(_ModelMeta):
@classmethod
def __prepare__(mcls, name, bases, **kwds):
# this shows the parent class machinery still applies
namespace = super().__prepare__(name, bases, **kwds)
# the custom bit
namespace.update(kwds)
return namespace
model = models.Gaussian1D(1.5, 2.5, 3.5)
assert model.amplitude._description == "Amplitude (peak value) of the Gaussian"
assert model.mean._description == "Position of peak (Gaussian)"
def test_metaclass_kwargs():
"""Test can pass kwargs to Models"""
class ClassModel(FittableModel, flag="flag"):
def evaluate(self):
pass
# Nothing further to test, just making the class is good enough.
def test_submetaclass_kwargs():
"""Test can pass kwargs to Model subclasses."""
class ClassModel(FittableModel, metaclass=_ExtendedModelMeta, flag="flag"):
def evaluate(self):
pass
assert ClassModel.flag == "flag"
class ModelDefault(Model):
slope = Parameter()
intercept = Parameter()
_separable = False
@staticmethod
def evaluate(x, slope, intercept):
return slope * x + intercept
class ModelCustom(ModelDefault):
def _calculate_separability_matrix(self):
return np.array([[0, ]])
def test_custom_separability_matrix():
original = separability_matrix(ModelDefault(slope=1, intercept=2))
assert original.all()
custom = separability_matrix(ModelCustom(slope=1, intercept=2))
assert not custom.any()
|
6402396d8a145e5c431af6fef398b1ec6b1b2dc55e77734d802d56862ddcc41e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests model set evaluation and fitting for some common use cases.
"""
import numpy as np
# pylint: disable=invalid-name
import pytest
from numpy.testing import assert_allclose
from astropy.modeling.core import Model
from astropy.modeling.fitting import LinearLSQFitter
from astropy.modeling.models import (
Chebyshev1D, Chebyshev2D, Hermite1D, Hermite2D, Legendre1D, Legendre2D, Linear1D, Planar2D,
Polynomial1D, Polynomial2D)
from astropy.modeling.parameters import Parameter
from astropy.utils import NumpyRNGContext
x = np.arange(4)
xx = np.array([x, x + 10])
xxx = np.arange(24).reshape((3, 4, 2))
_RANDOM_SEED = 0x1337
class TParModel(Model):
"""
A toy model to test parameters machinery
"""
# standard_broadasting = False
n_inputs = 1
outputs = ('x',)
coeff = Parameter()
e = Parameter()
def __init__(self, coeff, e, **kwargs):
super().__init__(coeff=coeff, e=e, **kwargs)
@staticmethod
def evaluate(x, coeff, e):
return x*coeff + e
@pytest.mark.parametrize('model_class', [Polynomial1D, Chebyshev1D, Legendre1D, Hermite1D])
def test_model1d_axis_1(model_class):
"""
Test that a model initialized with model_set_axis=1
can be evaluated with model_set_axis=False.
"""
n_models = 2
model_axis = 1
c0 = [[2, 3]]
c1 = [[1, 2]]
t1 = model_class(1, c0=2, c1=1)
t2 = model_class(1, c0=3, c1=2)
p1 = model_class(1, c0=c0, c1=c1, n_models=n_models, model_set_axis=model_axis)
with pytest.raises(ValueError):
p1(x)
y = p1(x, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0], t1(x))
assert_allclose(y[:, 1], t2(x))
y = p1(xx, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0, :], t1(xx))
assert_allclose(y[:, 1, :], t2(xx))
y = p1(xxx, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0, :, :], t1(xxx))
assert_allclose(y[:, 1, :, :], t2(xxx))
@pytest.mark.parametrize('model_class', [Polynomial1D, Chebyshev1D, Legendre1D, Hermite1D])
def test_model1d_axis_2(model_class):
"""
Test that a model initialized with model_set_axis=2
can be evaluated with model_set_axis=False.
"""
p1 = model_class(1, c0=[[[1, 2, 3]]], c1=[[[10, 20, 30]]],
n_models=3, model_set_axis=2)
t1 = model_class(1, c0=1, c1=10)
t2 = model_class(1, c0=2, c1=20)
t3 = model_class(1, c0=3, c1=30)
with pytest.raises(ValueError):
p1(x)
with pytest.raises(ValueError):
p1(xx)
y = p1(x, model_set_axis=False)
assert y.shape == (1, 4, 3)
assert_allclose(y[:, :, 0].flatten(), t1(x))
assert_allclose(y[:, :, 1].flatten(), t2(x))
assert_allclose(y[:, :, 2].flatten(), t3(x))
@pytest.mark.parametrize('model_class', [Polynomial1D, Chebyshev1D, Legendre1D, Hermite1D])
def test_model1d_axis_0(model_class):
"""
Test that a model initialized with model_set_axis=0
can be evaluated with model_set_axis=False.
"""
p1 = model_class(1, n_models=2, model_set_axis=0)
p1.c0 = [2, 3]
p1.c1 = [1, 2]
t1 = model_class(1, c0=2, c1=1)
t2 = model_class(1, c0=3, c1=2)
with pytest.raises(ValueError):
p1(x)
y = p1(xx)
assert len(y) == 2
assert_allclose(y[0], t1(xx[0]))
assert_allclose(y[1], t2(xx[1]))
y = p1(x, model_set_axis=False)
assert len(y) == 2
assert_allclose(y[0], t1(x))
assert_allclose(y[1], t2(x))
y = p1(xx, model_set_axis=False)
assert len(y) == 2
assert_allclose(y[0], t1(xx))
assert_allclose(y[1], t2(xx))
y = p1(xxx, model_set_axis=False)
assert_allclose(y[0], t1(xxx))
assert_allclose(y[1], t2(xxx))
assert len(y) == 2
@pytest.mark.parametrize('model_class', [Chebyshev2D, Legendre2D, Hermite2D])
def test_model2d_axis_2(model_class):
"""
Test that a model initialized with model_set_axis=2
can be evaluated with model_set_axis=False.
"""
p2 = model_class(1, 1, c0_0=[[[0, 1, 2]]], c0_1=[[[3, 4, 5]]],
c1_0=[[[5, 6, 7]]], c1_1=[[[1, 1, 1]]], n_models=3, model_set_axis=2)
t1 = model_class(1, 1, c0_0=0, c0_1=3, c1_0=5, c1_1=1)
t2 = model_class(1, 1, c0_0=1, c0_1=4, c1_0=6, c1_1=1)
t3 = model_class(1, 1, c0_0=2, c0_1=5, c1_0=7, c1_1=1)
assert p2.c0_0.shape == (1, 1, 3)
y = p2(x, x, model_set_axis=False)
assert y.shape == (1, 4, 3)
# These are columns along the 2nd axis.
assert_allclose(y[:, :, 0].flatten(), t1(x, x))
assert_allclose(y[:, :, 1].flatten(), t2(x, x))
assert_allclose(y[:, :, 2].flatten(), t3(x, x))
def test_negative_axis():
p1 = Polynomial1D(1, c0=[1, 2], c1=[3, 4], n_models=2, model_set_axis=-1)
t1 = Polynomial1D(1, c0=1, c1=3)
t2 = Polynomial1D(1, c0=2, c1=4)
with pytest.raises(ValueError):
p1(x)
with pytest.raises(ValueError):
p1(xx)
xxt = xx.T
y = p1(xxt)
assert_allclose(y[:, 0], t1(xxt[:, 0]))
assert_allclose(y[:, 1], t2(xxt[:, 1]))
def test_shapes():
p2 = Polynomial1D(1, n_models=3, model_set_axis=2)
assert p2.c0.shape == (1, 1, 3)
assert p2.c1.shape == (1, 1, 3)
p1 = Polynomial1D(1, n_models=2, model_set_axis=1)
assert p1.c0.shape == (1, 2)
assert p1.c1.shape == (1, 2)
p1 = Polynomial1D(1, c0=[1, 2], c1=[3, 4], n_models=2, model_set_axis=-1)
assert p1.c0.shape == (2,)
assert p1.c1.shape == (2,)
e1 = [1, 2]
e2 = [3, 4]
a1 = np.array([[10, 20], [30, 40]])
a2 = np.array([[50, 60], [70, 80]])
t = TParModel([a1, a2], [e1, e2], n_models=2, model_set_axis=-1)
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2)
t = TParModel([[a1, a2]], [[e1, e2]], n_models=2, model_set_axis=1)
assert t.coeff.shape == (1, 2, 2, 2)
assert t.e.shape == (1, 2, 2)
t = TParModel([a1, a2], [e1, e2], n_models=2, model_set_axis=0)
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2)
t = TParModel([a1, a2], e=[1, 2], n_models=2, model_set_axis=0)
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2,)
def test_eval():
""" Tests evaluation of Linear1D and Planar2D with different model_set_axis."""
model = Linear1D(slope=[1, 2], intercept=[3, 4], n_models=2)
p = Polynomial1D(1, c0=[3, 4], c1=[1, 2], n_models=2)
assert_allclose(model(xx), p(xx))
assert_allclose(model(x, model_set_axis=False), p(x, model_set_axis=False))
with pytest.raises(ValueError):
model(x)
model = Linear1D(slope=[[1, 2]], intercept=[[3, 4]], n_models=2, model_set_axis=1)
p = Polynomial1D(1, c0=[[3, 4]], c1=[[1, 2]], n_models=2, model_set_axis=1)
assert_allclose(model(xx.T), p(xx.T))
assert_allclose(model(x, model_set_axis=False), p(x, model_set_axis=False))
with pytest.raises(ValueError):
model(xx)
model = Planar2D(slope_x=[1, 2], slope_y=[1, 2], intercept=[3, 4], n_models=2)
y = model(xx, xx)
assert y.shape == (2, 4)
with pytest.raises(ValueError):
model(x)
# Test fitting
@pytest.mark.parametrize('model_class', [Polynomial1D, Chebyshev1D, Legendre1D, Hermite1D])
def test_linearlsqfitter(model_class):
"""
Issue #7159
"""
p = model_class(1, n_models=2, model_set_axis=1)
# Generate data for fitting 2 models and re-stack them along the last axis:
y = np.array([2*x+1, x+4])
y = np.rollaxis(y, 0, -1).T
f = LinearLSQFitter()
# This seems to fit the model_set correctly:
fit = f(p, x, y)
model_y = fit(x, model_set_axis=False)
m1 = model_class(1, c0=fit.c0[0][0], c1=fit.c1[0][0], domain=fit.domain)
m2 = model_class(1, c0=fit.c0[0][1], c1=fit.c1[0][1], domain=fit.domain)
assert_allclose(model_y[:, 0], m1(x))
assert_allclose(model_y[:, 1], m2(x))
p = model_class(1, n_models=2, model_set_axis=0)
fit = f(p, x, y.T)
def test_model_set_axis_outputs():
fitter = LinearLSQFitter()
model_set = Polynomial2D(1, n_models=2, model_set_axis=2)
y2, x2 = np.mgrid[: 5, : 5]
# z = np.moveaxis([x2 + y2, 1 - 0.1 * x2 + 0.2 * y2]), 0, 2)
z = np.rollaxis(np.array([x2 + y2, 1 - 0.1 * x2 + 0.2 * y2]), 0, 3)
model = fitter(model_set, x2, y2, z)
res = model(x2, y2, model_set_axis=False)
assert z.shape == res.shape
# Test initializing with integer model_set_axis
# and evaluating with a different model_set_axis
model_set = Polynomial1D(1, c0=[1, 2], c1=[2, 3],
n_models=2, model_set_axis=0)
y0 = model_set(xx)
y1 = model_set(xx.T, model_set_axis=1)
assert_allclose(y0[0], y1[:, 0])
assert_allclose(y0[1], y1[:, 1])
model_set = Polynomial1D(1, c0=[[1, 2]], c1=[[2, 3]],
n_models=2, model_set_axis=1)
y0 = model_set(xx.T)
y1 = model_set(xx, model_set_axis=0)
assert_allclose(y0[:, 0], y1[0])
assert_allclose(y0[:, 1], y1[1])
with pytest.raises(ValueError):
model_set(x)
def test_fitting_shapes():
""" Test fitting model sets of Linear1D and Planar2D."""
fitter = LinearLSQFitter()
model = Linear1D(slope=[1, 2], intercept=[3, 4], n_models=2)
y = model(xx)
fitter(model, x, y)
model = Linear1D(slope=[[1, 2]], intercept=[[3, 4]], n_models=2, model_set_axis=1)
fitter(model, x, y.T)
model = Planar2D(slope_x=[1, 2], slope_y=[1, 2], intercept=[3, 4], n_models=2)
y = model(xx, xx)
fitter(model, x, x, y)
def test_compound_model_sets():
with pytest.raises(ValueError):
(Polynomial1D(1, n_models=2, model_set_axis=1) |
Polynomial1D(1, n_models=2, model_set_axis=0))
def test_linear_fit_model_set_errors():
init_model = Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y = init_model(x, model_set_axis=False)
fitter = LinearLSQFitter()
with pytest.raises(ValueError):
fitter(init_model, x[:5], y)
with pytest.raises(ValueError):
fitter(init_model, x, y[:, :5])
def test_linear_fit_model_set_common_weight():
"""Tests fitting multiple models simultaneously."""
init_model = Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
fitter = LinearLSQFitter()
weights = np.ones(10)
weights[[0, -1]] = 0
fitted_model = fitter(init_model, x, y, weights=weights)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected,
rtol=1e-1)
# Check that using null weights raises an error
# ValueError: On entry to DLASCL parameter number 4 had an illegal value
with pytest.raises(ValueError,
match='Found NaNs in the coefficient matrix'):
with pytest.warns(RuntimeWarning,
match=r'invalid value encountered in.*divide'):
fitted_model = fitter(init_model, x, y, weights=np.zeros(10))
def test_linear_fit_model_set_weights():
"""Tests fitting multiple models simultaneously."""
init_model = Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
weights = np.ones_like(y)
# Put a null weight for the min and max values
weights[[0, 1], weights.argmin(axis=1)] = 0
weights[[0, 1], weights.argmax(axis=1)] = 0
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, weights=weights)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected,
rtol=1e-1)
# Check that using null weights raises an error
weights[0] = 0
with pytest.raises(ValueError,
match='Found NaNs in the coefficient matrix'):
with pytest.warns(RuntimeWarning,
match=r'invalid value encountered in.*divide'):
fitted_model = fitter(init_model, x, y, weights=weights)
# Now we mask the values where weight is 0
with pytest.warns(RuntimeWarning,
match=r'invalid value encountered in.*divide'):
fitted_model = fitter(init_model, x,
np.ma.array(y, mask=np.isclose(weights, 0)),
weights=weights)
# Parameters for the first model are all NaNs
assert np.all(np.isnan(fitted_model.param_sets[:, 0]))
assert np.all(np.isnan(fitted_model(x, model_set_axis=False)[0]))
# Second model is fitted correctly
assert_allclose(fitted_model(x, model_set_axis=False)[1], y_expected[1],
rtol=1e-1)
def test_linear_fit_2d_model_set_errors():
init_model = Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)
x = np.arange(10)
y = np.arange(10)
z = init_model(x, y, model_set_axis=False)
fitter = LinearLSQFitter()
with pytest.raises(ValueError):
fitter(init_model, x[:5], y, z)
with pytest.raises(ValueError):
fitter(init_model, x, y, z[:, :5])
def test_linear_fit_2d_model_set_common_weight():
init_model = Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz, weights=np.ones((5, 5)))
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
def test_linear_fit_flat_2d_model_set_common_weight():
init_model = Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
x, y = x.flatten(), y.flatten()
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
weights = np.ones(25)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz, weights=weights)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
def test_linear_fit_2d_model_set_weights():
init_model = Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
fitter = LinearLSQFitter()
weights = [np.ones((5, 5)), np.ones((5, 5))]
fitted_model = fitter(init_model, x, y, zz, weights=weights)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
def test_linear_fit_flat_2d_model_set_weights():
init_model = Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
x, y = x.flatten(), y.flatten()
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
weights = np.ones((2, 25))
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz, weights=weights)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
class Test1ModelSet:
"""
Check that fitting a single model works with a length-1 model set axis.
It's not clear that this was originally intended usage, but it can be
convenient, eg. when fitting a range of image rows that may be a single
row, and some existing scripts might rely on it working.
Currently this does not work with FittingWithOutlierRemoval.
"""
def setup_class(self):
self.x1 = np.arange(0, 10)
self.y1 = np.array([0.5 + 2.5*self.x1])
self.w1 = np.ones((10,))
self.y1[0, 8] = 100.
self.w1[8] = 0.
self.y2, self.x2 = np.mgrid[0:10, 0:10]
self.z2 = np.array([1 - 0.1*self.x2 + 0.2*self.y2])
self.w2 = np.ones((10, 10))
self.z2[0, 1, 2] = 100.
self.w2[1, 2] = 0.
def test_linear_1d_common_weights(self):
model = Polynomial1D(1)
fitter = LinearLSQFitter()
model = fitter(model, self.x1, self.y1, weights=self.w1)
assert_allclose(model.c0, 0.5, atol=1e-12)
assert_allclose(model.c1, 2.5, atol=1e-12)
def test_linear_1d_separate_weights(self):
model = Polynomial1D(1)
fitter = LinearLSQFitter()
model = fitter(model, self.x1, self.y1,
weights=self.w1[np.newaxis, ...])
assert_allclose(model.c0, 0.5, atol=1e-12)
assert_allclose(model.c1, 2.5, atol=1e-12)
def test_linear_1d_separate_weights_axis_1(self):
model = Polynomial1D(1, model_set_axis=1)
fitter = LinearLSQFitter()
model = fitter(model, self.x1, self.y1.T,
weights=self.w1[..., np.newaxis])
assert_allclose(model.c0, 0.5, atol=1e-12)
assert_allclose(model.c1, 2.5, atol=1e-12)
def test_linear_2d_common_weights(self):
model = Polynomial2D(1)
fitter = LinearLSQFitter()
model = fitter(model, self.x2, self.y2, self.z2, weights=self.w2)
assert_allclose(model.c0_0, 1., atol=1e-12)
assert_allclose(model.c1_0, -0.1, atol=1e-12)
assert_allclose(model.c0_1, 0.2, atol=1e-12)
def test_linear_2d_separate_weights(self):
model = Polynomial2D(1)
fitter = LinearLSQFitter()
model = fitter(model, self.x2, self.y2, self.z2,
weights=self.w2[np.newaxis, ...])
assert_allclose(model.c0_0, 1., atol=1e-12)
assert_allclose(model.c1_0, -0.1, atol=1e-12)
assert_allclose(model.c0_1, 0.2, atol=1e-12)
def test_linear_2d_separate_weights_axis_2(self):
model = Polynomial2D(1, model_set_axis=2)
fitter = LinearLSQFitter()
model = fitter(model, self.x2, self.y2, np.rollaxis(self.z2, 0, 3),
weights=self.w2[..., np.newaxis])
assert_allclose(model.c0_0, 1., atol=1e-12)
assert_allclose(model.c1_0, -0.1, atol=1e-12)
assert_allclose(model.c0_1, 0.2, atol=1e-12)
|
3f97222663d33ab2844cfe62efc1ceefc87955bff35db2bb023037d3d3217539 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests that relate to evaluating models with quantity parameters
"""
# pylint: disable=invalid-name, no-member
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.modeling.core import Model
from astropy.modeling.models import Gaussian1D, Pix2Sky_TAN, Scale, Shift
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import UnitsError
# We start off by taking some simple cases where the units are defined by
# whatever the model is initialized with, and we check that the model evaluation
# returns quantities.
def test_evaluate_with_quantities():
"""
Test evaluation of a single model with Quantity parameters that do
not explicitly require units.
"""
# We create two models here - one with quantities, and one without. The one
# without is used to create the reference values for comparison.
g = Gaussian1D(1, 1, 0.1)
gq = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# We first check that calling the Gaussian with quantities returns the
# expected result
assert_quantity_allclose(gq(1 * u.m), g(1) * u.J)
# Units have to be specified for the Gaussian with quantities - if not, an
# error is raised
with pytest.raises(UnitsError) as exc:
gq(1)
assert exc.value.args[0] == ("Gaussian1D: Units of input 'x', (dimensionless), could not be "
"converted to required input units of m (length)")
# However, zero is a special case
assert_quantity_allclose(gq(0), g(0) * u.J)
# We can also evaluate models with equivalent units
assert_allclose(gq(0.0005 * u.km).value, g(0.5))
# But not with incompatible units
with pytest.raises(UnitsError) as exc:
gq(3 * u.s)
assert exc.value.args[0] == ("Gaussian1D: Units of input 'x', s (time), could not be "
"converted to required input units of m (length)")
# We also can't evaluate the model without quantities with a quantity
with pytest.raises(UnitsError) as exc:
g(3 * u.m)
# TODO: determine what error message should be here
# assert exc.value.args[0] == ("Units of input 'x', m (length), could not be "
# "converted to required dimensionless input")
def test_evaluate_with_quantities_and_equivalencies():
"""
We now make sure that equivalencies are correctly taken into account
"""
g = Gaussian1D(1 * u.Jy, 10 * u.nm, 2 * u.nm)
# We aren't setting the equivalencies, so this won't work
with pytest.raises(UnitsError) as exc:
g(30 * u.PHz)
assert exc.value.args[0] == ("Gaussian1D: Units of input 'x', PHz (frequency), could "
"not be converted to required input units of "
"nm (length)")
# But it should now work if we pass equivalencies when evaluating
assert_quantity_allclose(g(30 * u.PHz, equivalencies={'x': u.spectral()}),
g(9.993081933333332 * u.nm))
class MyTestModel(Model):
n_inputs = 2
n_outputs = 1
def evaluate(self, a, b):
print('a', a)
print('b', b)
return a * b
class TestInputUnits():
def setup_method(self, method):
self.model = MyTestModel()
def test_evaluate(self):
# We should be able to evaluate with anything
assert_quantity_allclose(self.model(3, 5), 15)
assert_quantity_allclose(self.model(4 * u.m, 5), 20 * u.m)
assert_quantity_allclose(self.model(3 * u.deg, 5), 15 * u.deg)
def test_input_units(self):
self.model._input_units = {'x': u.deg}
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad)
assert_quantity_allclose(self.model(4 * u.rad, 2 * u.s), 8 * u.rad * u.s)
with pytest.raises(UnitsError) as exc:
self.model(4 * u.s, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'x', s (time), could not be "
"converted to required input units of deg (angle)")
with pytest.raises(UnitsError) as exc:
self.model(3, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'x', (dimensionless), could "
"not be converted to required input units of deg (angle)")
def test_input_units_allow_dimensionless(self):
self.model._input_units = {'x': u.deg}
self.model._input_units_allow_dimensionless = True
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad)
with pytest.raises(UnitsError) as exc:
self.model(4 * u.s, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'x', s (time), could not be "
"converted to required input units of deg (angle)")
assert_quantity_allclose(self.model(3, 3), 9)
def test_input_units_strict(self):
self.model._input_units = {'x': u.deg}
self.model._input_units_strict = True
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
result = self.model(np.pi * u.rad, 2)
assert_quantity_allclose(result, 360 * u.deg)
assert result.unit is u.deg
def test_input_units_equivalencies(self):
self.model._input_units = {'x': u.micron}
with pytest.raises(UnitsError) as exc:
self.model(3 * u.PHz, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'x', PHz (frequency), could "
"not be converted to required input units of "
"micron (length)")
self.model.input_units_equivalencies = {'x': u.spectral()}
assert_quantity_allclose(self.model(3 * u.PHz, 3),
3 * (3 * u.PHz).to(u.micron, equivalencies=u.spectral()))
def test_return_units(self):
self.model._input_units = {'z': u.deg}
self.model._return_units = {'z': u.rad}
result = self.model(3 * u.deg, 4)
assert_quantity_allclose(result, 12 * u.deg)
assert result.unit is u.rad
def test_return_units_scalar(self):
# Check that return_units also works when giving a single unit since
# there is only one output, so is unambiguous.
self.model._input_units = {'x': u.deg}
self.model._return_units = u.rad
result = self.model(3 * u.deg, 4)
assert_quantity_allclose(result, 12 * u.deg)
assert result.unit is u.rad
def test_and_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 & s2
out = cs(10 * u.arcsecond, 20 * u.arcsecond)
assert_quantity_allclose(out[0], 10 * u.deg + 10 * u.arcsec)
assert_quantity_allclose(out[1], 10 * u.deg + 20 * u.arcsec)
def test_plus_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 + s2
out = cs(10 * u.arcsecond)
assert_quantity_allclose(out, 20 * u.deg + 20 * u.arcsec)
def test_compound_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
out = cs(10 * u.arcsecond)
assert_quantity_allclose(out, 20 * u.deg + 10 * u.arcsec)
def test_compound_input_units_fail():
"""
Test incompatible units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
with pytest.raises(UnitsError):
cs(10 * u.pix)
def test_compound_incompatible_units_fail():
"""
Test incompatible model units in chain.
"""
s1 = Shift(10 * u.pix)
s2 = Shift(10 * u.deg)
cs = s1 | s2
with pytest.raises(UnitsError):
cs(10 * u.pix)
def test_compound_pipe_equiv_call():
"""
Check that equivalencies work when passed to evaluate, for a chained model
(which has one input).
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
out = cs(10 * u.pix, equivalencies={'x': u.pixel_scale(0.5 * u.deg / u.pix)})
assert_quantity_allclose(out, 25 * u.deg)
def test_compound_and_equiv_call():
"""
Check that equivalencies work when passed to evaluate, for a composite model
with two inputs.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 & s2
out = cs(10 * u.pix, 10 * u.pix, equivalencies={'x0': u.pixel_scale(0.5 * u.deg / u.pix),
'x1': u.pixel_scale(0.5 * u.deg / u.pix)})
assert_quantity_allclose(out[0], 15 * u.deg)
assert_quantity_allclose(out[1], 15 * u.deg)
def test_compound_input_units_equivalencies():
"""
Test setting input_units_equivalencies on one of the models.
"""
s1 = Shift(10 * u.deg)
s1.input_units_equivalencies = {'x': u.pixel_scale(0.5 * u.deg / u.pix)}
s2 = Shift(10 * u.deg)
sp = Shift(10 * u.pix)
cs = s1 | s2
assert cs.input_units_equivalencies == {'x': u.pixel_scale(0.5 * u.deg / u.pix)}
out = cs(10 * u.pix)
assert_quantity_allclose(out, 25 * u.deg)
cs = sp | s1
assert cs.input_units_equivalencies is None
out = cs(10 * u.pix)
assert_quantity_allclose(out, 20 * u.deg)
cs = s1 & s2
assert cs.input_units_equivalencies == {'x0': u.pixel_scale(0.5 * u.deg / u.pix)}
cs = cs.rename('TestModel')
out = cs(20 * u.pix, 10 * u.deg)
assert_quantity_allclose(out, 20 * u.deg)
with pytest.raises(UnitsError) as exc:
out = cs(20 * u.pix, 10 * u.pix)
assert exc.value.args[0] == ("Shift: Units of input 'x', pix (unknown), could not be converted "
"to required input units of deg (angle)")
def test_compound_input_units_strict():
"""
Test setting input_units_strict on one of the models.
"""
class ScaleDegrees(Scale):
input_units = {'x': u.deg}
s1 = ScaleDegrees(2)
s2 = Scale(2)
cs = s1 | s2
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
assert out.unit is u.deg # important since this tests input_units_strict
cs = s2 | s1
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
assert out.unit is u.deg # important since this tests input_units_strict
cs = s1 & s2
out = cs(10 * u.arcsec, 10 * u.arcsec)
assert_quantity_allclose(out, 20 * u.arcsec)
assert out[0].unit is u.deg
assert out[1].unit is u.arcsec
def test_compound_input_units_allow_dimensionless():
"""
Test setting input_units_allow_dimensionless on one of the models.
"""
class ScaleDegrees(Scale):
input_units = {'x': u.deg}
s1 = ScaleDegrees(2)
s1._input_units_allow_dimensionless = True
s2 = Scale(2)
cs = s1 | s2
cs = cs.rename('TestModel')
out = cs(10)
assert_quantity_allclose(out, 40 * u.one)
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
with pytest.raises(UnitsError) as exc:
out = cs(10 * u.m)
assert exc.value.args[0] == ("ScaleDegrees: Units of input 'x', m (length), "
"could not be converted to required input units of deg (angle)")
s1._input_units_allow_dimensionless = False
cs = s1 | s2
cs = cs.rename('TestModel')
with pytest.raises(UnitsError) as exc:
out = cs(10)
assert exc.value.args[0] == ("ScaleDegrees: Units of input 'x', (dimensionless), "
"could not be converted to required input units of deg (angle)")
s1._input_units_allow_dimensionless = True
cs = s2 | s1
cs = cs.rename('TestModel')
out = cs(10)
assert_quantity_allclose(out, 40 * u.one)
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
with pytest.raises(UnitsError) as exc:
out = cs(10 * u.m)
assert exc.value.args[0] == ("ScaleDegrees: Units of input 'x', m (length), "
"could not be converted to required input units of deg (angle)")
s1._input_units_allow_dimensionless = False
cs = s2 | s1
with pytest.raises(UnitsError) as exc:
out = cs(10)
assert exc.value.args[0] == ("ScaleDegrees: Units of input 'x', (dimensionless), "
"could not be converted to required input units of deg (angle)")
s1._input_units_allow_dimensionless = True
s1 = ScaleDegrees(2)
s1._input_units_allow_dimensionless = True
s2 = ScaleDegrees(2)
s2._input_units_allow_dimensionless = False
cs = s1 & s2
cs = cs.rename('TestModel')
out = cs(10, 10 * u.arcsec)
assert_quantity_allclose(out[0], 20 * u.one)
assert_quantity_allclose(out[1], 20 * u.arcsec)
with pytest.raises(UnitsError) as exc:
out = cs(10, 10)
assert exc.value.args[0] == ("ScaleDegrees: Units of input 'x', (dimensionless), "
"could not be converted to required input units of deg (angle)")
def test_compound_return_units():
"""
Test that return_units on the first model in the chain is respected for the
input to the second.
"""
class PassModel(Model):
n_inputs = 2
n_outputs = 2
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def input_units(self):
""" Input units. """
return {'x0': u.deg, 'x1': u.deg}
@property
def return_units(self):
""" Output units. """
return {'x0': u.deg, 'x1': u.deg}
def evaluate(self, x, y):
return x.value, y.value
cs = Pix2Sky_TAN() | PassModel()
assert_quantity_allclose(cs(0*u.deg, 0*u.deg), (0, 90)*u.deg)
|
701df49e00095147dea3d5fae46618f195c402952d36696e5182ef3cafd4064b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests that relate to fitting models with quantity parameters
"""
# pylint: disable=invalid-name, no-member
import numpy as np
import pytest
from astropy import units as u
from astropy.modeling import fitting, models
from astropy.modeling.core import Fittable1DModel
from astropy.modeling.parameters import Parameter
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import UnitsError
from astropy.utils import NumpyRNGContext
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa: F401
# Fitting should be as intuitive as possible to the user. Essentially, models
# and fitting should work without units, but if one has units, the other should
# have units too, and the resulting fitted parameters will also have units.
fitters = [fitting.LevMarLSQFitter, fitting.TRFLSQFitter, fitting.LMLSQFitter,
fitting.DogBoxLSQFitter]
def _fake_gaussian_data():
# Generate fake data
with NumpyRNGContext(12345):
x = np.linspace(-5., 5., 2000)
y = 3 * np.exp(-0.5 * (x - 1.3)**2 / 0.8**2)
y += np.random.normal(0., 0.2, x.shape)
# Attach units to data
x = x * u.m
y = y * u.Jy
return x, y
compound_models_no_units = [
models.Linear1D() + models.Gaussian1D() + models.Gaussian1D(),
models.Linear1D() + models.Gaussian1D() | models.Scale(),
models.Linear1D() + models.Gaussian1D() | models.Shift(),
]
class CustomInputNamesModel(Fittable1DModel):
n_inputs = 1
n_outputs = 1
a = Parameter(default=1.0)
b = Parameter(default=1.0)
def __init__(self, a=a, b=b):
super().__init__(a=a, b=b)
self.inputs = ('inn',)
self.outputs = ('out',)
@staticmethod
def evaluate(inn, a, b):
return a * inn + b
@property
def input_units(self):
if self.a.unit is None and self.b.unit is None:
return None
else:
return {'inn': self.b.unit / self.a.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
'a': outputs_unit['out'] / inputs_unit['inn'],
'b': outputs_unit['out']
}
def models_with_custom_names():
line = models.Linear1D(1 * u.m / u.s, 2 * u.m)
line.inputs = ('inn',)
line.outputs = ('out',)
custom_names_model = CustomInputNamesModel(1 * u.m / u.s, 2 * u.m)
return [line, custom_names_model]
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', fitters)
def test_fitting_simple(fitter):
fitter = fitter()
x, y = _fake_gaussian_data()
# Fit the data using a Gaussian with units
g_init = models.Gaussian1D()
g = fitter(g_init, x, y)
# TODO: update actual numerical results once implemented, but these should
# be close to the values below.
assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)
assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)
assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', fitters)
def test_fitting_with_initial_values(fitter):
fitter = fitter()
x, y = _fake_gaussian_data()
# Fit the data using a Gaussian with units
g_init = models.Gaussian1D(amplitude=1. * u.mJy,
mean=3 * u.cm,
stddev=2 * u.mm)
g = fitter(g_init, x, y)
# TODO: update actual numerical results once implemented, but these should
# be close to the values below.
assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)
assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)
assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', fitters)
def test_fitting_missing_data_units(fitter):
"""
Raise an error if the model has units but the data doesn't
"""
fitter = fitter()
class UnorderedGaussian1D(models.Gaussian1D):
# Parameters are ordered differently here from Gaussian1D
# to ensure the order does not break functionality.
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'amplitude': outputs_unit['y'],
'mean': inputs_unit['x'],
'stddev': inputs_unit['x']}
g_init = UnorderedGaussian1D(amplitude=1. * u.mJy,
mean=3 * u.cm,
stddev=2 * u.mm)
# We define flux unit so that conversion fails at wavelength unit.
# This is because the order of parameter unit conversion seems to
# follow the order defined in _parameter_units_for_data_units method.
with pytest.raises(UnitsError) as exc:
fitter(g_init, [1, 2, 3],
[4, 5, 6] * (u.erg / (u.s * u.cm * u.cm * u.Hz)))
assert exc.value.args[0] == ("'cm' (length) and '' (dimensionless) are "
"not convertible")
with pytest.raises(UnitsError) as exc:
fitter(g_init, [1, 2, 3] * u.m, [4, 5, 6])
assert exc.value.args[0] == ("'mJy' (spectral flux density) and '' "
"(dimensionless) are not convertible")
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', fitters)
def test_fitting_missing_model_units(fitter):
"""
Proceed if the data has units but the model doesn't
"""
fitter = fitter()
x, y = _fake_gaussian_data()
g_init = models.Gaussian1D(amplitude=1., mean=3, stddev=2)
g = fitter(g_init, x, y)
assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)
assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)
assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)
g_init = models.Gaussian1D(amplitude=1., mean=3 * u.m, stddev=2 * u.m)
g = fitter(g_init, x, y)
assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)
assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)
assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', fitters)
def test_fitting_incompatible_units(fitter):
"""
Raise an error if the data and model have incompatible units
"""
fitter = fitter()
g_init = models.Gaussian1D(amplitude=1. * u.Jy,
mean=3 * u.m,
stddev=2 * u.cm)
with pytest.raises(UnitsError) as exc:
fitter(g_init, [1, 2, 3] * u.Hz, [4, 5, 6] * u.Jy)
assert exc.value.args[0] == ("'Hz' (frequency) and 'm' (length) are not convertible")
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings(r'ignore:The fit may be unsuccessful.*')
@pytest.mark.filterwarnings(r'ignore:divide by zero encountered.*')
@pytest.mark.parametrize('model', compound_models_no_units)
@pytest.mark.parametrize('fitter', fitters)
def test_compound_without_units(model, fitter):
fitter = fitter()
x = np.linspace(-5, 5, 10) * u.Angstrom
with NumpyRNGContext(12345):
y = np.random.sample(10)
res_fit = fitter(model, x, y * u.Hz)
for param_name in res_fit.param_names:
print(getattr(res_fit, param_name))
assert all([res_fit[i]._has_units for i in range(3)])
z = res_fit(x)
assert isinstance(z, u.Quantity)
res_fit = fitter(model, np.arange(10) * u.Unit('Angstrom'), y)
assert all([res_fit[i]._has_units for i in range(3)])
z = res_fit(x)
assert isinstance(z, np.ndarray)
# FIXME: See https://github.com/astropy/astropy/issues/10675
# @pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.skip(reason='Flaky and ill-conditioned')
@pytest.mark.parametrize('fitter', fitters)
def test_compound_fitting_with_units(fitter):
fitter = fitter()
x = np.linspace(-5, 5, 15) * u.Angstrom
y = np.linspace(-5, 5, 15) * u.Angstrom
fitter = fitter()
m = models.Gaussian2D(10*u.Hz,
3*u.Angstrom, 4*u.Angstrom,
1*u.Angstrom, 2*u.Angstrom)
p = models.Planar2D(3*u.Hz/u.Angstrom, 4*u.Hz/u.Angstrom, 1*u.Hz)
model = m + p
z = model(x, y)
res = fitter(model, x, y, z)
assert isinstance(res(x, y), np.ndarray)
assert all([res[i]._has_units for i in range(2)])
model = models.Gaussian2D() + models.Planar2D()
res = fitter(model, x, y, z)
assert isinstance(res(x, y), np.ndarray)
assert all([res[i]._has_units for i in range(2)])
# A case of a mixture of models with and without units
model = models.BlackBody(temperature=3000 * u.K) * models.Const1D(amplitude=1.0)
x = np.linspace(1, 3, 10000) * u.micron
with NumpyRNGContext(12345):
n = np.random.normal(3)
y = model(x)
res = fitter(model, x, y * (1 + n))
# The large rtol here is due to different results on linux and macosx, likely
# the model is ill-conditioned.
np.testing.assert_allclose(res.parameters, [3000, 2.1433621e+00, 2.647347e+00], rtol=0.4)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings(r'ignore:Model is linear in parameters*')
@pytest.mark.parametrize('model', models_with_custom_names())
@pytest.mark.parametrize('fitter', fitters)
def test_fitting_custom_names(model, fitter):
""" Tests fitting of models with custom inputs and outsputs names."""
fitter = fitter()
x = np.linspace(0, 10, 100) * u.s
y = model(x)
new_model = fitter(model, x, y)
for param_name in model.param_names:
assert_quantity_allclose(getattr(new_model, param_name).quantity,
getattr(model, param_name).quantity)
|
ac313a8a863288f5b83b8663fa3f0119e84e8d43e8cd55654478e398bc5f6c46 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests fitting and model evaluation with various inputs
"""
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy.modeling import fitting, models
from astropy.modeling.core import Fittable1DModel, FittableModel, Model
from astropy.modeling.parameters import Parameter
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa: F401
model1d_params = [
(models.Polynomial1D, [2]),
(models.Legendre1D, [2]),
(models.Chebyshev1D, [2]),
(models.Shift, [2]),
(models.Scale, [2])
]
model2d_params = [
(models.Polynomial2D, [2]),
(models.Legendre2D, [1, 2]),
(models.Chebyshev2D, [1, 2])
]
fitters = [
fitting.LevMarLSQFitter,
fitting.TRFLSQFitter,
fitting.LMLSQFitter,
fitting.DogBoxLSQFitter
]
class TestInputType:
"""
This class tests that models accept numbers, lists and arrays.
Add new models to one of the lists above to test for this.
"""
def setup_class(self):
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, .1)
self.y1 = np.arange(1, 10, .1)
self.y2, self.x2 = np.mgrid[:10, :8]
@pytest.mark.parametrize(('model', 'params'), model1d_params)
def test_input1D(self, model, params):
m = model(*params)
m(self.x)
m(self.x1)
m(self.x2)
@pytest.mark.parametrize(('model', 'params'), model2d_params)
def test_input2D(self, model, params):
m = model(*params)
m(self.x, self.y)
m(self.x1, self.y1)
m(self.x2, self.y2)
class TestFitting:
"""Test various input options to fitting routines."""
def setup_class(self):
self.x1 = np.arange(10)
self.y, self.x = np.mgrid[:10, :10]
def test_linear_fitter_1set(self):
"""1 set 1D x, 1pset"""
expected = np.array([0, 1, 1, 1])
p1 = models.Polynomial1D(3)
p1.parameters = [0, 1, 1, 1]
y1 = p1(self.x1)
pfit = fitting.LinearLSQFitter()
model = pfit(p1, self.x1, y1)
assert_allclose(model.parameters, expected, atol=10 ** (-7))
def test_linear_fitter_Nset(self):
"""1 set 1D x, 2 sets 1D y, 2 param_sets"""
expected = np.array([[0, 0], [1, 1], [2, 2], [3, 3]])
p1 = models.Polynomial1D(3, n_models=2)
p1.parameters = [0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0]
params = {}
for i in range(4):
params[p1.param_names[i]] = [i, i]
p1 = models.Polynomial1D(3, model_set_axis=0, **params)
y1 = p1(self.x1, model_set_axis=False)
pfit = fitting.LinearLSQFitter()
model = pfit(p1, self.x1, y1)
assert_allclose(model.param_sets, expected, atol=10 ** (-7))
def test_linear_fitter_1dcheb(self):
"""1 pset, 1 set 1D x, 1 set 1D y, Chebyshev 1D polynomial"""
expected = np.array(
[[2817.2499999999995,
4226.6249999999991,
1680.7500000000009,
273.37499999999926]]).T
ch1 = models.Chebyshev1D(3)
ch1.parameters = [0, 1, 2, 3]
y1 = ch1(self.x1)
pfit = fitting.LinearLSQFitter()
model = pfit(ch1, self.x1, y1)
assert_allclose(model.param_sets, expected, atol=10 ** (-2))
def test_linear_fitter_1dlegend(self):
"""
1 pset, 1 set 1D x, 1 set 1D y, Legendre 1D polynomial
"""
expected = np.array(
[[1925.5000000000011,
3444.7500000000005,
1883.2500000000014,
364.4999999999996]]).T
leg1 = models.Legendre1D(3)
leg1.parameters = [1, 2, 3, 4]
y1 = leg1(self.x1)
pfit = fitting.LinearLSQFitter()
model = pfit(leg1, self.x1, y1)
assert_allclose(model.param_sets, expected, atol=10 ** (-12))
def test_linear_fitter_1set2d(self):
p2 = models.Polynomial2D(2)
p2.parameters = [0, 1, 2, 3, 4, 5]
expected = [0, 1, 2, 3, 4, 5]
z = p2(self.x, self.y)
pfit = fitting.LinearLSQFitter()
model = pfit(p2, self.x, self.y, z)
assert_allclose(model.parameters, expected, atol=10 ** (-12))
assert_allclose(model(self.x, self.y), z, atol=10 ** (-12))
def test_wrong_numpset(self):
"""
A ValueError is raised if a 1 data set (1d x, 1d y) is fit
with a model with multiple parameter sets.
"""
with pytest.raises(ValueError):
p1 = models.Polynomial1D(5)
y1 = p1(self.x1)
p1 = models.Polynomial1D(5, n_models=2)
pfit = fitting.LinearLSQFitter()
pfit(p1, self.x1, y1)
def test_wrong_pset(self):
"""A case of 1 set of x and multiple sets of y and parameters."""
expected = np.array([[1., 0],
[1, 1],
[1, 2],
[1, 3],
[1, 4],
[1, 5]])
p1 = models.Polynomial1D(5, n_models=2)
params = {}
for i in range(6):
params[p1.param_names[i]] = [1, i]
p1 = models.Polynomial1D(5, model_set_axis=0, **params)
y1 = p1(self.x1, model_set_axis=False)
pfit = fitting.LinearLSQFitter()
model = pfit(p1, self.x1, y1)
assert_allclose(model.param_sets, expected, atol=10 ** (-7))
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', fitters)
def test_nonlinear_lsqt_1set_1d(self, fitter):
"""1 set 1D x, 1 set 1D y, 1 pset NonLinearFitter"""
fitter = fitter()
g1 = models.Gaussian1D(10, mean=3, stddev=.2)
y1 = g1(self.x1)
model = fitter(g1, self.x1, y1)
assert_allclose(model.parameters, [10, 3, .2])
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', fitters)
def test_nonlinear_lsqt_Nset_1d(self, fitter):
"""1 set 1D x, 1 set 1D y, 2 param_sets, NonLinearFitter"""
fitter = fitter()
with pytest.raises(ValueError):
g1 = models.Gaussian1D([10.2, 10], mean=[3, 3.2], stddev=[.23, .2],
n_models=2)
y1 = g1(self.x1, model_set_axis=False)
_ = fitter(g1, self.x1, y1)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', fitters)
def test_nonlinear_lsqt_1set_2d(self, fitter):
"""1 set 2d x, 1set 2D y, 1 pset, NonLinearFitter"""
fitter = fitter()
g2 = models.Gaussian2D(10, x_mean=3, y_mean=4, x_stddev=.3,
y_stddev=.2, theta=0)
z = g2(self.x, self.y)
model = fitter(g2, self.x, self.y, z)
assert_allclose(model.parameters, [10, 3, 4, .3, .2, 0])
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', fitters)
def test_nonlinear_lsqt_Nset_2d(self, fitter):
"""1 set 2d x, 1set 2D y, 2 param_sets, NonLinearFitter"""
fitter = fitter()
with pytest.raises(ValueError):
g2 = models.Gaussian2D([10, 10], [3, 3], [4, 4], x_stddev=[.3, .3],
y_stddev=[.2, .2], theta=[0, 0], n_models=2)
z = g2(self.x.flatten(), self.y.flatten())
_ = fitter(g2, self.x, self.y, z)
class TestEvaluation:
"""
Test various input options to model evaluation
TestFitting actually covers evaluation of polynomials
"""
def setup_class(self):
self.x1 = np.arange(20)
self.y, self.x = np.mgrid[:10, :10]
def test_non_linear_NYset(self):
"""
This case covers:
N param sets , 1 set 1D x --> N 1D y data
"""
g1 = models.Gaussian1D([10, 10], [3, 3], [.2, .2], n_models=2)
y1 = g1(self.x1, model_set_axis=False)
assert np.all((y1[0, :] - y1[1, :]).nonzero() == np.array([]))
def test_non_linear_NXYset(self):
"""
This case covers: N param sets , N sets 1D x --> N N sets 1D y data
"""
g1 = models.Gaussian1D([10, 10], [3, 3], [.2, .2], n_models=2)
xx = np.array([self.x1, self.x1])
y1 = g1(xx)
assert_allclose(y1[:, 0], y1[:, 1], atol=10 ** (-12))
def test_p1_1set_1pset(self):
"""1 data set, 1 pset, Polynomial1D"""
p1 = models.Polynomial1D(4)
y1 = p1(self.x1)
assert y1.shape == (20,)
def test_p1_nset_npset(self):
"""N data sets, N param_sets, Polynomial1D"""
p1 = models.Polynomial1D(4, n_models=2)
y1 = p1(np.array([self.x1, self.x1]).T, model_set_axis=-1)
assert y1.shape == (20, 2)
assert_allclose(y1[0, :], y1[1, :], atol=10 ** (-12))
def test_p2_1set_1pset(self):
"""1 pset, 1 2D data set, Polynomial2D"""
p2 = models.Polynomial2D(5)
z = p2(self.x, self.y)
assert z.shape == (10, 10)
def test_p2_nset_npset(self):
"""N param_sets, N 2D data sets, Poly2d"""
p2 = models.Polynomial2D(5, n_models=2)
xx = np.array([self.x, self.x])
yy = np.array([self.y, self.y])
z = p2(xx, yy)
assert z.shape == (2, 10, 10)
def test_nset_domain(self):
"""
Test model set with negative model_set_axis.
In this case model_set_axis=-1 is identical to model_set_axis=1.
"""
xx = np.array([self.x1, self.x1]).T
xx[0, 0] = 100
xx[1, 0] = 100
xx[2, 0] = 99
p1 = models.Polynomial1D(5, c0=[1, 2], c1=[3, 4], n_models=2)
yy = p1(xx, model_set_axis=-1)
assert_allclose(xx.shape, yy.shape)
yy1 = p1(xx, model_set_axis=1)
assert_allclose(yy, yy1)
def test_evaluate_gauss2d(self):
cov = np.array([[1., 0.8], [0.8, 3]])
g = models.Gaussian2D(1., 5., 4., cov_matrix=cov)
y, x = np.mgrid[:10, :10]
g(x, y)
class TModel_1_1(Fittable1DModel):
p1 = Parameter()
p2 = Parameter()
@staticmethod
def evaluate(x, p1, p2):
return x + p1 + p2
class TestSingleInputSingleOutputSingleModel:
"""
A suite of tests to check various cases of parameter and input combinations
on models with n_input = n_output = 1 on a toy model with n_models=1.
Many of these tests mirror test cases in
``astropy.modeling.tests.test_parameters.TestParameterInitialization``,
except that this tests how different parameter arrangements interact with
different types of model inputs.
"""
def test_scalar_parameters_scalar_input(self):
"""
Scalar parameters with a scalar input should return a scalar.
"""
t = TModel_1_1(1, 10)
y = t(100)
assert isinstance(y, float)
assert np.ndim(y) == 0
assert y == 111
def test_scalar_parameters_1d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_1(1, 10)
y = t(np.arange(5) * 100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (5,)
assert np.all(y == [11, 111, 211, 311, 411])
def test_scalar_parameters_2d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_1(1, 10)
y = t(np.arange(6).reshape(2, 3) * 100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (2, 3)
assert np.all(y == [[11, 111, 211],
[311, 411, 511]])
def test_scalar_parameters_3d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_1(1, 10)
y = t(np.arange(12).reshape(2, 3, 2) * 100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (2, 3, 2)
assert np.all(y == [[[11, 111], [211, 311], [411, 511]],
[[611, 711], [811, 911], [1011, 1111]]])
def test_1d_array_parameters_scalar_input(self):
"""
Array parameters should all be broadcastable with each other, and with
a scalar input the output should be broadcast to the maximum dimensions
of the parameters.
"""
t = TModel_1_1([1, 2], [10, 20])
y = t(100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (2,)
assert np.all(y == [111, 122])
def test_1d_array_parameters_1d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_1([1, 2], [10, 20])
y1 = t([100, 200])
assert np.shape(y1) == (2,)
assert np.all(y1 == [111, 222])
y2 = t([[100], [200]])
assert np.shape(y2) == (2, 2)
assert np.all(y2 == [[111, 122], [211, 222]])
with pytest.raises(ValueError):
# Doesn't broadcast
t([100, 200, 300])
def test_2d_array_parameters_2d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_1([[1, 2], [3, 4]], [[10, 20], [30, 40]])
y1 = t([[100, 200], [300, 400]])
assert np.shape(y1) == (2, 2)
assert np.all(y1 == [[111, 222], [333, 444]])
y2 = t([[[[100]], [[200]]], [[[300]], [[400]]]])
assert np.shape(y2) == (2, 2, 2, 2)
assert np.all(y2 == [[[[111, 122], [133, 144]],
[[211, 222], [233, 244]]],
[[[311, 322], [333, 344]],
[[411, 422], [433, 444]]]])
with pytest.raises(ValueError):
# Doesn't broadcast
t([[100, 200, 300], [400, 500, 600]])
def test_mixed_array_parameters_1d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_1([[[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]],
[[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]]],
[1, 2, 3])
y1 = t([10, 20, 30])
assert np.shape(y1) == (2, 2, 3)
assert_allclose(y1, [[[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]],
[[11.07, 22.08, 33.09], [11.10, 22.11, 33.12]]])
y2 = t([[[[10]]], [[[20]]], [[[30]]]])
assert np.shape(y2) == (3, 2, 2, 3)
assert_allclose(y2, [[[[11.01, 12.02, 13.03],
[11.04, 12.05, 13.06]],
[[11.07, 12.08, 13.09],
[11.10, 12.11, 13.12]]],
[[[21.01, 22.02, 23.03],
[21.04, 22.05, 23.06]],
[[21.07, 22.08, 23.09],
[21.10, 22.11, 23.12]]],
[[[31.01, 32.02, 33.03],
[31.04, 32.05, 33.06]],
[[31.07, 32.08, 33.09],
[31.10, 32.11, 33.12]]]])
class TestSingleInputSingleOutputTwoModel:
"""
A suite of tests to check various cases of parameter and input combinations
on models with n_input = n_output = 1 on a toy model with n_models=2.
Many of these tests mirror test cases in
``astropy.modeling.tests.test_parameters.TestParameterInitialization``,
except that this tests how different parameter arrangements interact with
different types of model inputs.
With n_models=2 all outputs should have a first dimension of size 2 (unless
defined with model_set_axis != 0).
"""
def test_scalar_parameters_scalar_input(self):
"""
Scalar parameters with a scalar input should return a 1-D array with
size equal to the number of models.
"""
t = TModel_1_1([1, 2], [10, 20], n_models=2)
y = t(100)
assert np.shape(y) == (2,)
assert np.all(y == [111, 122])
def test_scalar_parameters_1d_array_input(self):
"""
The dimension of the input should match the number of models unless
model_set_axis=False is given, in which case the input is copied across
all models.
"""
t = TModel_1_1([1, 2], [10, 20], n_models=2)
with pytest.raises(ValueError):
t(np.arange(5) * 100)
y1 = t([100, 200])
assert np.shape(y1) == (2,)
assert np.all(y1 == [111, 222])
y2 = t([100, 200], model_set_axis=False)
# In this case the value [100, 200, 300] should be evaluated on each
# model rather than evaluating the first model with 100 and the second
# model with 200
assert np.shape(y2) == (2, 2)
assert np.all(y2 == [[111, 211], [122, 222]])
y3 = t([100, 200, 300], model_set_axis=False)
assert np.shape(y3) == (2, 3)
assert np.all(y3 == [[111, 211, 311], [122, 222, 322]])
def test_scalar_parameters_2d_array_input(self):
"""
The dimension of the input should match the number of models unless
model_set_axis=False is given, in which case the input is copied across
all models.
"""
t = TModel_1_1([1, 2], [10, 20], n_models=2)
y1 = t(np.arange(6).reshape(2, 3) * 100)
assert np.shape(y1) == (2, 3)
assert np.all(y1 == [[11, 111, 211],
[322, 422, 522]])
y2 = t(np.arange(6).reshape(2, 3) * 100, model_set_axis=False)
assert np.shape(y2) == (2, 2, 3)
assert np.all(y2 == [[[11, 111, 211], [311, 411, 511]],
[[22, 122, 222], [322, 422, 522]]])
def test_scalar_parameters_3d_array_input(self):
"""
The dimension of the input should match the number of models unless
model_set_axis=False is given, in which case the input is copied across
all models.
"""
t = TModel_1_1([1, 2], [10, 20], n_models=2)
data = np.arange(12).reshape(2, 3, 2) * 100
y1 = t(data)
assert np.shape(y1) == (2, 3, 2)
assert np.all(y1 == [[[11, 111], [211, 311], [411, 511]],
[[622, 722], [822, 922], [1022, 1122]]])
y2 = t(data, model_set_axis=False)
assert np.shape(y2) == (2, 2, 3, 2)
assert np.all(y2 == np.array([data + 11, data + 22]))
def test_1d_array_parameters_scalar_input(self):
"""
Array parameters should all be broadcastable with each other, and with
a scalar input the output should be broadcast to the maximum dimensions
of the parameters.
"""
t = TModel_1_1([[1, 2, 3], [4, 5, 6]],
[[10, 20, 30], [40, 50, 60]], n_models=2)
y = t(100)
assert np.shape(y) == (2, 3)
assert np.all(y == [[111, 122, 133], [144, 155, 166]])
def test_1d_array_parameters_1d_array_input(self):
"""
When the input is an array, if model_set_axis=False then it must
broadcast with the shapes of the parameters (excluding the
model_set_axis).
Otherwise all dimensions must be broadcastable.
"""
t = TModel_1_1([[1, 2, 3], [4, 5, 6]],
[[10, 20, 30], [40, 50, 60]], n_models=2)
with pytest.raises(ValueError):
y1 = t([100, 200, 300])
y1 = t([100, 200])
assert np.shape(y1) == (2, 3)
assert np.all(y1 == [[111, 122, 133], [244, 255, 266]])
with pytest.raises(ValueError):
# Doesn't broadcast with the shape of the parameters, (3,)
y2 = t([100, 200], model_set_axis=False)
y2 = t([100, 200, 300], model_set_axis=False)
assert np.shape(y2) == (2, 3)
assert np.all(y2 == [[111, 222, 333],
[144, 255, 366]])
def test_2d_array_parameters_2d_array_input(self):
t = TModel_1_1([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
n_models=2)
y1 = t([[100, 200], [300, 400]])
assert np.shape(y1) == (2, 2, 2)
assert np.all(y1 == [[[111, 222], [133, 244]],
[[355, 466], [377, 488]]])
with pytest.raises(ValueError):
y2 = t([[100, 200, 300], [400, 500, 600]])
y2 = t([[[100, 200], [300, 400]], [[500, 600], [700, 800]]])
assert np.shape(y2) == (2, 2, 2)
assert np.all(y2 == [[[111, 222], [333, 444]],
[[555, 666], [777, 888]]])
def test_mixed_array_parameters_1d_array_input(self):
t = TModel_1_1([[[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]],
[[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]]],
[[1, 2, 3], [4, 5, 6]], n_models=2)
with pytest.raises(ValueError):
y = t([10, 20, 30])
y = t([10, 20, 30], model_set_axis=False)
assert np.shape(y) == (2, 2, 3)
assert_allclose(y, [[[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]],
[[14.07, 25.08, 36.09], [14.10, 25.11, 36.12]]])
class TModel_1_2(FittableModel):
n_inputs = 1
n_outputs = 2
p1 = Parameter()
p2 = Parameter()
p3 = Parameter()
@staticmethod
def evaluate(x, p1, p2, p3):
return (x + p1 + p2, x + p1 + p2 + p3)
class TestSingleInputDoubleOutputSingleModel:
"""
A suite of tests to check various cases of parameter and input combinations
on models with n_input = 1 but n_output = 2 on a toy model with n_models=1.
As of writing there are not enough controls to adjust how outputs from such
a model should be formatted (currently the shapes of outputs are assumed to
be directly associated with the shapes of corresponding inputs when
n_inputs == n_outputs). For now, the approach taken for cases like this is
to assume all outputs should have the same format.
"""
def test_scalar_parameters_scalar_input(self):
"""
Scalar parameters with a scalar input should return a scalar.
"""
t = TModel_1_2(1, 10, 1000)
y, z = t(100)
assert isinstance(y, float)
assert isinstance(z, float)
assert np.ndim(y) == np.ndim(z) == 0
assert y == 111
assert z == 1111
def test_scalar_parameters_1d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_2(1, 10, 1000)
y, z = t(np.arange(5) * 100)
assert isinstance(y, np.ndarray)
assert isinstance(z, np.ndarray)
assert np.shape(y) == np.shape(z) == (5,)
assert np.all(y == [11, 111, 211, 311, 411])
assert np.all(z == (y + 1000))
def test_scalar_parameters_2d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_2(1, 10, 1000)
y, z = t(np.arange(6).reshape(2, 3) * 100)
assert isinstance(y, np.ndarray)
assert isinstance(z, np.ndarray)
assert np.shape(y) == np.shape(z) == (2, 3)
assert np.all(y == [[11, 111, 211],
[311, 411, 511]])
assert np.all(z == (y + 1000))
def test_scalar_parameters_3d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_2(1, 10, 1000)
y, z = t(np.arange(12).reshape(2, 3, 2) * 100)
assert isinstance(y, np.ndarray)
assert isinstance(z, np.ndarray)
assert np.shape(y) == np.shape(z) == (2, 3, 2)
assert np.all(y == [[[11, 111], [211, 311], [411, 511]],
[[611, 711], [811, 911], [1011, 1111]]])
assert np.all(z == (y + 1000))
def test_1d_array_parameters_scalar_input(self):
"""
Array parameters should all be broadcastable with each other, and with
a scalar input the output should be broadcast to the maximum dimensions
of the parameters.
"""
t = TModel_1_2([1, 2], [10, 20], [1000, 2000])
y, z = t(100)
assert isinstance(y, np.ndarray)
assert isinstance(z, np.ndarray)
assert np.shape(y) == np.shape(z) == (2,)
assert np.all(y == [111, 122])
assert np.all(z == [1111, 2122])
def test_1d_array_parameters_1d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_2([1, 2], [10, 20], [1000, 2000])
y1, z1 = t([100, 200])
assert np.shape(y1) == np.shape(z1) == (2,)
assert np.all(y1 == [111, 222])
assert np.all(z1 == [1111, 2222])
y2, z2 = t([[100], [200]])
assert np.shape(y2) == np.shape(z2) == (2, 2)
assert np.all(y2 == [[111, 122], [211, 222]])
assert np.all(z2 == [[1111, 2122], [1211, 2222]])
with pytest.raises(ValueError):
# Doesn't broadcast
y3, z3 = t([100, 200, 300])
def test_2d_array_parameters_2d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_2([[1, 2], [3, 4]], [[10, 20], [30, 40]],
[[1000, 2000], [3000, 4000]])
y1, z1 = t([[100, 200], [300, 400]])
assert np.shape(y1) == np.shape(z1) == (2, 2)
assert np.all(y1 == [[111, 222], [333, 444]])
assert np.all(z1 == [[1111, 2222], [3333, 4444]])
y2, z2 = t([[[[100]], [[200]]], [[[300]], [[400]]]])
assert np.shape(y2) == np.shape(z2) == (2, 2, 2, 2)
assert np.all(y2 == [[[[111, 122], [133, 144]],
[[211, 222], [233, 244]]],
[[[311, 322], [333, 344]],
[[411, 422], [433, 444]]]])
assert np.all(z2 == [[[[1111, 2122], [3133, 4144]],
[[1211, 2222], [3233, 4244]]],
[[[1311, 2322], [3333, 4344]],
[[1411, 2422], [3433, 4444]]]])
with pytest.raises(ValueError):
# Doesn't broadcast
y3, z3 = t([[100, 200, 300], [400, 500, 600]])
def test_mixed_array_parameters_1d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_2([[[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]],
[[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]]],
[1, 2, 3], [100, 200, 300])
y1, z1 = t([10, 20, 30])
assert np.shape(y1) == np.shape(z1) == (2, 2, 3)
assert_allclose(y1, [[[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]],
[[11.07, 22.08, 33.09], [11.10, 22.11, 33.12]]])
assert_allclose(z1, [[[111.01, 222.02, 333.03],
[111.04, 222.05, 333.06]],
[[111.07, 222.08, 333.09],
[111.10, 222.11, 333.12]]])
y2, z2 = t([[[[10]]], [[[20]]], [[[30]]]])
assert np.shape(y2) == np.shape(z2) == (3, 2, 2, 3)
assert_allclose(y2, [[[[11.01, 12.02, 13.03],
[11.04, 12.05, 13.06]],
[[11.07, 12.08, 13.09],
[11.10, 12.11, 13.12]]],
[[[21.01, 22.02, 23.03],
[21.04, 22.05, 23.06]],
[[21.07, 22.08, 23.09],
[21.10, 22.11, 23.12]]],
[[[31.01, 32.02, 33.03],
[31.04, 32.05, 33.06]],
[[31.07, 32.08, 33.09],
[31.10, 32.11, 33.12]]]])
assert_allclose(z2, [[[[111.01, 212.02, 313.03],
[111.04, 212.05, 313.06]],
[[111.07, 212.08, 313.09],
[111.10, 212.11, 313.12]]],
[[[121.01, 222.02, 323.03],
[121.04, 222.05, 323.06]],
[[121.07, 222.08, 323.09],
[121.10, 222.11, 323.12]]],
[[[131.01, 232.02, 333.03],
[131.04, 232.05, 333.06]],
[[131.07, 232.08, 333.09],
[131.10, 232.11, 333.12]]]])
# test broadcasting rules
broadcast_models = [
{
'model': models.Identity(2),
'inputs': [0, [1, 1]],
'outputs': [0, [1, 1]]
},
{
'model': models.Identity(2),
'inputs': [[1, 1], 0],
'outputs': [[1, 1], 0]
},
{
'model': models.Mapping((0, 1)),
'inputs': [0, [1, 1]],
'outputs': [0, [1, 1]]
},
{
'model': models.Mapping((1, 0)),
'inputs': [0, [1, 1]],
'outputs': [[1, 1], 0]
},
{
'model': models.Mapping((1, 0), n_inputs=3),
'inputs': [0, [1, 1], 2],
'outputs': [[1, 1], 0]
},
{
'model': models.Mapping((0, 1, 0)),
'inputs': [0, [1, 1]],
'outputs': [0, [1, 1], 0]
},
{
'model': models.Mapping((0, 1, 1)),
'inputs': [0, [1, 1]],
'outputs': [0, [1, 1], [1, 1]]
},
{
'model': models.Polynomial2D(1, c0_0=1),
'inputs': [0, [1, 1]],
'outputs': [1, 1]
},
{
'model': models.Polynomial2D(1, c0_0=1),
'inputs': [0, 1],
'outputs': 1
},
{
'model': models.Gaussian2D(1, 1, 2, 1, 1.2),
'inputs': [0, [1, 1]],
'outputs': [0.42860385, 0.42860385]
},
{
'model': models.Gaussian2D(1, 1, 2, 1, 1.2),
'inputs': [0, 1],
'outputs': 0.428603846153
},
{
'model': models.Polynomial2D(1, c0_0=1) & models.Polynomial2D(1, c0_0=2),
'inputs': [1, 1, 1, 1],
'outputs': (1, 2)
},
{
'model': models.Polynomial2D(1, c0_0=1) & models.Polynomial2D(1, c0_0=2),
'inputs': [1, 1, [1, 1], [1, 1]],
'outputs': (1, [2, 2])
},
{
'model': models.math.MultiplyUfunc(),
'inputs': [np.array([np.linspace(0, 1, 5)]).T, np.arange(2)],
'outputs': np.array([[0., 0.],
[0., 0.25],
[0., 0.5],
[0., 0.75],
[0., 1.]])
}
]
@pytest.mark.parametrize('model', broadcast_models)
def test_mixed_input(model):
result = model['model'](*model['inputs'])
if np.isscalar(result):
assert_allclose(result, model['outputs'])
else:
for i in range(len(result)):
assert_allclose(result[i], model['outputs'][i])
def test_more_outputs():
class M(FittableModel):
standard_broadcasting = False
n_inputs = 2
n_outputs = 3
a = Parameter()
def evaluate(self, x, y, a):
return a*x, a-x, a+y
def __call__(self, *args, **kwargs):
inputs, _ = super().prepare_inputs(*args, **kwargs)
outputs = self.evaluate(*inputs, *self.parameters)
output_shapes = [out.shape for out in outputs]
output_shapes = [() if shape == (1,) else shape for shape in output_shapes]
return self.prepare_outputs((tuple(output_shapes),), *outputs, **kwargs)
c = M(1)
result = c([1, 1], 1)
expected = [[1., 1.], [0., 0.], 2.]
for r, e in zip(result, expected):
assert_allclose(r, e)
c = M(1)
result = c(1, [1, 1])
expected = [1., 0., [2., 2.]]
for r, e in zip(result, expected):
assert_allclose(r, e)
class TInputFormatter(Model):
"""
A toy model to test input/output formatting.
"""
n_inputs = 2
n_outputs = 2
outputs = ('x', 'y')
@staticmethod
def evaluate(x, y):
return x, y
def test_format_input_scalars():
model = TInputFormatter()
result = model(1, 2)
assert result == (1, 2)
def test_format_input_arrays():
model = TInputFormatter()
result = model([1, 1], [2, 2])
assert_allclose(result, (np.array([1, 1]), np.array([2, 2])))
def test_format_input_arrays_transposed():
model = TInputFormatter()
input = np.array([[1, 1]]).T, np.array([[2, 2]]).T
result = model(*input)
assert_allclose(result, input)
@pytest.mark.parametrize('model',
[models.Gaussian2D(), models.Polynomial2D(1,),
models.Rotation2D(), models.Pix2Sky_TAN(),
models.Tabular2D(lookup_table=np.ones((4, 5)))])
@pytest.mark.skipif('not HAS_SCIPY')
def test_call_keyword_args_1(model):
"""
Test calling a model with positional, keywrd and a mixture of both arguments.
"""
positional = model(1, 2)
assert_allclose(positional, model(x=1, y=2))
assert_allclose(positional, model(1, y=2))
model.inputs = ('r', 't')
assert_allclose(positional, model(r=1, t=2))
assert_allclose(positional, model(1, t=2))
assert_allclose(positional, model(1, 2))
with pytest.raises(ValueError):
model(1, 2, 3)
with pytest.raises(ValueError):
model(1)
with pytest.raises(ValueError):
model(1, 2, t=12, r=3)
@pytest.mark.parametrize('model',
[models.Gaussian1D(), models.Polynomial1D(1,),
models.Tabular1D(lookup_table=np.ones((5,)))])
@pytest.mark.skipif('not HAS_SCIPY')
def test_call_keyword_args_2(model):
"""
Test calling a model with positional, keywrd and a mixture of both arguments.
"""
positional = model(1)
assert_allclose(positional, model(x=1))
model.inputs = ('r',)
assert_allclose(positional, model(r=1))
with pytest.raises(ValueError):
model(1, 2, 3)
with pytest.raises(ValueError):
model()
with pytest.raises(ValueError):
model(1, 2, t=12, r=3)
@pytest.mark.parametrize('model',
[models.Gaussian2D() | models.Polynomial1D(1,),
models.Gaussian1D() & models.Polynomial1D(1,),
models.Gaussian2D() + models.Polynomial2D(1,),
models.Gaussian2D() - models.Polynomial2D(1,),
models.Gaussian2D() * models.Polynomial2D(1,),
models.Identity(2) | models.Polynomial2D(1),
models.Mapping((1,)) | models.Polynomial1D(1)])
def test_call_keyword_args_3(model):
"""
Test calling a model with positional, keywrd and a mixture of both arguments.
"""
positional = model(1, 2)
model.inputs = ('r', 't')
assert_allclose(positional, model(r=1, t=2))
assert_allclose(positional, model(1, t=2))
with pytest.raises(ValueError):
model(1, 2, 3)
with pytest.raises(ValueError):
model()
with pytest.raises(ValueError):
model(1, 2, t=12, r=3)
@pytest.mark.parametrize('model',
[models.Identity(2), models.Mapping((0, 1)),
models.Mapping((1,))])
def test_call_keyword_mappings(model):
"""
Test calling a model with positional, keywrd and a mixture of both arguments.
"""
positional = model(1, 2)
assert_allclose(positional, model(x0=1, x1=2))
assert_allclose(positional, model(1, x1=2))
# We take a copy before modifying the model since otherwise this changes
# the instance used in the parametrize call and affects future test runs.
model = model.copy()
model.inputs = ('r', 't')
assert_allclose(positional, model(r=1, t=2))
assert_allclose(positional, model(1, t=2))
assert_allclose(positional, model(1, 2))
with pytest.raises(ValueError):
model(1, 2, 3)
with pytest.raises(ValueError):
model(1)
with pytest.raises(ValueError):
model(1, 2, t=12, r=3)
|
503e2f47910164fdae9960f8da53a7ed8743f21f6bbc3f40cbabe4b74cac3d61 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests that relate to using quantities/units on parameters of models.
"""
# pylint: disable=invalid-name, no-member
import numpy as np
import pytest
from astropy import coordinates as coord
from astropy import units as u
from astropy.modeling.core import Fittable1DModel, InputParameterError
from astropy.modeling.models import Gaussian1D, Pix2Sky_TAN, RotateNative2Celestial, Rotation2D
from astropy.modeling.parameters import Parameter, ParameterDefinitionError
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import UnitsError
class BaseTestModel(Fittable1DModel):
@staticmethod
def evaluate(x, a):
return x
def test_parameter_quantity():
"""
Basic tests for initializing general models (that do not require units)
with parameters that have units attached.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
assert g.amplitude.value == 1.0
assert g.amplitude.unit is u.J
assert g.mean.value == 1.0
assert g.mean.unit is u.m
assert g.stddev.value == 0.1
assert g.stddev.unit is u.m
def test_parameter_set_quantity():
"""
Make sure that parameters that start off as quantities can be set to any
other quantity, regardless of whether the units of the new quantity are
compatible with the original ones.
We basically leave it up to the evaluate method to raise errors if there
are issues with incompatible units, and we don't check for consistency
at the parameter level.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Try equivalent units
g.amplitude = 4 * u.kJ
assert_quantity_allclose(g.amplitude, 4 * u.kJ)
g.mean = 3 * u.km
assert_quantity_allclose(g.mean, 3 * u.km)
g.stddev = 2 * u.mm
assert_quantity_allclose(g.stddev, 2 * u.mm)
# Try different units
g.amplitude = 2 * u.s
assert_quantity_allclose(g.amplitude, 2 * u.s)
g.mean = 2 * u.Jy
assert_quantity_allclose(g.mean, 2 * u.Jy)
def test_parameter_lose_units():
"""
Check that parameters that have been set to a quantity that are then set to
a value with no units raise an exception. We do this because setting a
parameter to a value with no units is ambiguous if units were set before:
if a parameter is 1 * u.Jy and the parameter is then set to 4, does this mean
2 without units, or 2 * u.Jy?
"""
g = Gaussian1D(1 * u.Jy, 3, 0.1)
with pytest.raises(UnitsError) as exc:
g.amplitude = 2
assert exc.value.args[0] == ("The 'amplitude' parameter should be given as "
"a Quantity because it was originally "
"initialized as a Quantity")
def test_parameter_add_units():
"""
On the other hand, if starting from a parameter with no units, we should be
able to add units since this is unambiguous.
"""
g = Gaussian1D(1, 3, 0.1)
g.amplitude = 2 * u.Jy
assert_quantity_allclose(g.amplitude, 2 * u.Jy)
def test_parameter_change_unit():
"""
Test that changing the unit on a parameter does not work. This is an
ambiguous operation because it's not clear if it means that the value should
be converted or if the unit should be changed without conversion.
"""
g = Gaussian1D(1, 1 * u.m, 0.1 * u.m)
# Setting a unit on a unitless parameter should not work
with pytest.raises(ValueError) as exc:
g.amplitude.unit = u.Jy
assert exc.value.args[0] == ("Cannot attach units to parameters that were "
"not initially specified with units")
# But changing to another unit should not, even if it is an equivalent unit
with pytest.raises(ValueError) as exc:
g.mean.unit = u.cm
assert exc.value.args[0] == ("Cannot change the unit attribute directly, "
"instead change the parameter to a new quantity")
def test_parameter_set_value():
"""
Test that changing the value on a parameter works as expected.
"""
g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m)
# To set a parameter to a quantity, we simply do
g.amplitude = 2 * u.Jy
# If we try setting the value, we need to pass a non-quantity value
# TODO: determine whether this is the desired behavior?
g.amplitude.value = 4
assert_quantity_allclose(g.amplitude, 4 * u.Jy)
assert g.amplitude.value == 4
assert g.amplitude.unit is u.Jy
# If we try setting it to a Quantity, we raise an error
with pytest.raises(TypeError) as exc:
g.amplitude.value = 3 * u.Jy
assert exc.value.args[0] == (
"The .value property on parameters should be set"
" to unitless values, not Quantity objects. To set"
"a parameter to a quantity simply set the "
"parameter directly without using .value"
)
def test_parameter_quantity_property():
"""
Test that the quantity property of Parameters behaves as expected
"""
# Since parameters have a .value and .unit parameter that return just the
# value and unit respectively, we also have a .quantity parameter that
# returns a Quantity instance.
g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m)
assert_quantity_allclose(g.amplitude.quantity, 1 * u.Jy)
# Setting a parameter to a quantity changes the value and the default unit
g.amplitude.quantity = 5 * u.mJy
assert g.amplitude.value == 5
assert g.amplitude.unit is u.mJy
# And we can also set the parameter to a value with different units
g.amplitude.quantity = 4 * u.s
assert g.amplitude.value == 4
assert g.amplitude.unit is u.s
# But not to a value without units
with pytest.raises(TypeError) as exc:
g.amplitude.quantity = 3
assert exc.value.args[0] == "The .quantity attribute should be set to a Quantity object"
def test_parameter_default_units_match():
# If the unit and default quantity units are different, raise an error
with pytest.raises(ParameterDefinitionError) as exc:
class TestC(Fittable1DModel):
a = Parameter(default=1.0 * u.m, unit=u.Jy)
assert exc.value.args[0] == ("parameter default 1.0 m does not have units "
"equivalent to the required unit Jy")
@pytest.mark.parametrize(('unit', 'default'), ((u.m, 1.0), (None, 1 * u.m)))
def test_parameter_defaults(unit, default):
"""
Test that default quantities are correctly taken into account
"""
class TestModel(BaseTestModel):
a = Parameter(default=default, unit=unit)
# TODO: decide whether the default property should return a value or
# a quantity?
# The default unit and value should be set on the class
assert TestModel.a.unit == u.m
assert TestModel.a.default == 1.0
# Check that the default unit and value are also set on a class instance
m = TestModel()
assert m.a.unit == u.m
assert m.a.default == m.a.value == 1.0
# If the parameter is set to a different value, the default is still the
# internal default
m = TestModel(2.0 * u.m)
assert m.a.unit == u.m
assert m.a.value == 2.0
assert m.a.default == 1.0
# Instantiate with a different, but compatible unit
m = TestModel(2.0 * u.pc)
assert m.a.unit == u.pc
assert m.a.value == 2.0
# The default is still in the original units
# TODO: but how do we know what those units are if we don't return a
# quantity?
assert m.a.default == 1.0
# Initialize with a completely different unit
m = TestModel(2.0 * u.Jy)
assert m.a.unit == u.Jy
assert m.a.value == 2.0
# TODO: this illustrates why the default doesn't make sense anymore
assert m.a.default == 1.0
# Instantiating with different units works, and just replaces the original unit
with pytest.raises(InputParameterError) as exc:
TestModel(1.0)
assert exc.value.args[0] == ("TestModel.__init__() requires a "
"Quantity for parameter 'a'")
def test_parameter_quantity_arithmetic():
"""
Test that arithmetic operations with properties that have units return the
appropriate Quantities.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Addition should work if units are compatible
assert g.mean + (1 * u.m) == 2 * u.m
assert (1 * u.m) + g.mean == 2 * u.m
# Multiplication by a scalar should also preserve the quantity-ness
assert g.mean * 2 == (2 * u.m)
assert 2 * g.mean == (2 * u.m)
# Multiplication by a quantity should result in units being multiplied
assert g.mean * (2 * u.m) == (2 * (u.m ** 2))
assert (2 * u.m) * g.mean == (2 * (u.m ** 2))
# Negation should work properly too
assert -g.mean == (-1 * u.m)
assert abs(-g.mean) == g.mean
# However, addition of a quantity + scalar should not work
with pytest.raises(UnitsError) as exc:
g.mean + 1
assert exc.value.args[0] == ("Can only apply 'add' function to "
"dimensionless quantities when other argument "
"is not a quantity (unless the latter is all "
"zero/infinity/nan)")
with pytest.raises(UnitsError) as exc:
1 + g.mean
assert exc.value.args[0] == ("Can only apply 'add' function to "
"dimensionless quantities when other argument "
"is not a quantity (unless the latter is all "
"zero/infinity/nan)")
def test_parameter_quantity_comparison():
"""
Basic test of comparison operations on properties with units.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Essentially here we are checking that parameters behave like Quantity
assert g.mean == 1 * u.m
assert 1 * u.m == g.mean
assert g.mean != 1
assert 1 != g.mean
assert g.mean < 2 * u.m
assert 2 * u.m > g.mean
with pytest.raises(UnitsError) as exc:
g.mean < 2
assert exc.value.args[0] == ("Can only apply 'less' function to "
"dimensionless quantities when other argument "
"is not a quantity (unless the latter is all "
"zero/infinity/nan)")
with pytest.raises(UnitsError) as exc:
2 > g.mean
assert exc.value.args[0] == ("Can only apply 'less' function to "
"dimensionless quantities when other argument "
"is not a quantity (unless the latter is all "
"zero/infinity/nan)")
g = Gaussian1D([1, 2] * u.J, [1, 2] * u.m, [0.1, 0.2] * u.m)
assert np.all(g.mean == [1, 2] * u.m)
assert np.all([1, 2] * u.m == g.mean)
assert np.all(g.mean != [1, 2])
assert np.all([1, 2] != g.mean)
with pytest.raises(UnitsError) as exc:
g.mean < [3, 4]
assert exc.value.args[0] == ("Can only apply 'less' function to "
"dimensionless quantities when other argument "
"is not a quantity (unless the latter is all "
"zero/infinity/nan)")
with pytest.raises(UnitsError) as exc:
[3, 4] > g.mean
assert exc.value.args[0] == ("Can only apply 'less' function to "
"dimensionless quantities when other argument "
"is not a quantity (unless the latter is all "
"zero/infinity/nan)")
def test_parameters_compound_models():
Pix2Sky_TAN()
sky_coords = coord.SkyCoord(ra=5.6, dec=-72, unit=u.deg)
lon_pole = 180 * u.deg
n2c = RotateNative2Celestial(sky_coords.ra, sky_coords.dec, lon_pole)
rot = Rotation2D(23)
rot | n2c
|
f5e009f222629e65c08c66143612df784872d7bda835214893fb24407c2a7e67 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for polynomial models."""
# pylint: disable=invalid-name
import os
import unittest.mock as mk
import warnings
from itertools import product
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import conf, wcs
from astropy.io import fits
from astropy.modeling import fitting
from astropy.modeling.functional_models import Linear1D
from astropy.modeling.mappings import Identity
from astropy.modeling.polynomial import (
SIP, Chebyshev1D, Chebyshev2D, Hermite1D, Hermite2D, Legendre1D, Legendre2D,
OrthoPolynomialBase, Polynomial1D, Polynomial2D, PolynomialBase)
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa: F401
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
linear1d = {
Chebyshev1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': True}}
},
Hermite1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': True}}
},
Legendre1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': True}}
},
Polynomial1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': True}}
},
Linear1D: {
'args': (),
'kwargs': {},
'parameters': {'intercept': 1.2, 'slope': 23.1},
'constraints': {'fixed': {'intercept': True}}
}
}
linear2d = {
Chebyshev2D: {
'args': (1, 1),
'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2},
'constraints': {'fixed': {'c0_0': True}}
},
Hermite2D: {
'args': (1, 1),
'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2},
'constraints': {'fixed': {'c0_0': True}}
},
Legendre2D: {
'args': (1, 1),
'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2},
'constraints': {'fixed': {'c0_0': True}}
},
Polynomial2D: {
'args': (1,),
'kwargs': {},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3},
'constraints': {'fixed': {'c0_0': True}}
}
}
fitters = [fitting.LevMarLSQFitter, fitting.TRFLSQFitter, fitting.LMLSQFitter,
fitting.DogBoxLSQFitter]
@pytest.mark.skipif('not HAS_SCIPY')
class TestFitting:
"""Test linear fitter with polynomial models."""
def setup_class(self):
self.N = 100
self.M = 100
self.x1 = np.linspace(1, 10, 100)
self.y2, self.x2 = np.mgrid[:100, :83]
rsn = np.random.default_rng(0)
self.n1 = rsn.standard_normal(self.x1.size) * .1
self.n2 = rsn.standard_normal(self.x2.size)
self.n2.shape = self.x2.shape
self.linear_fitter = fitting.LinearLSQFitter()
# TODO: Most of these test cases have some pretty repetitive setup that we
# could probably factor out
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear1d, key=str), (False, True))))
def test_linear_fitter_1D(self, model_class, constraints):
"""Test fitting with LinearLSQFitter"""
model_args = linear1d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
y1 = model(self.x1)
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', message=r'The fit may be poorly conditioned',
category=AstropyUserWarning)
model_lin = self.linear_fitter(model, self.x1, y1 + self.n1)
if constraints:
# For the constraints tests we're not checking the overall fit,
# just that the constraint was maintained
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_lin, param).value == expected
else:
assert_allclose(model_lin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear1d, key=str), (False, True))))
@pytest.mark.parametrize('fitter', fitters)
def test_non_linear_fitter_1D(self, model_class, constraints, fitter):
"""Test fitting with non-linear LevMarLSQFitter"""
fitter = fitter()
model_args = linear1d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
y1 = model(self.x1)
with pytest.warns(AstropyUserWarning,
match='Model is linear in parameters'):
model_nlin = fitter(model, self.x1, y1 + self.n1)
if constraints:
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_nlin, param).value == expected
else:
assert_allclose(model_nlin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear2d, key=str), (False, True))))
def test_linear_fitter_2D(self, model_class, constraints):
"""Test fitting with LinearLSQFitter"""
model_args = linear2d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
z = model(self.x2, self.y2)
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', message=r'The fit may be poorly conditioned',
category=AstropyUserWarning)
model_lin = self.linear_fitter(model, self.x2, self.y2, z + self.n2)
if constraints:
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_lin, param).value == expected
else:
assert_allclose(model_lin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear2d, key=str), (False, True))))
@pytest.mark.parametrize('fitter', fitters)
def test_non_linear_fitter_2D(self, model_class, constraints, fitter):
"""Test fitting with non-linear LevMarLSQFitter"""
fitter = fitter()
model_args = linear2d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
z = model(self.x2, self.y2)
with pytest.warns(AstropyUserWarning,
match='Model is linear in parameters'):
model_nlin = fitter(model, self.x2, self.y2, z + self.n2)
if constraints:
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_nlin, param).value == expected
else:
assert_allclose(model_nlin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize('model_class',
[cls for cls in list(linear1d) + list(linear2d)])
def test_polynomial_init_with_constraints(model_class):
"""
Test that polynomial models can be instantiated with constraints, but no
parameters specified.
Regression test for https://github.com/astropy/astropy/issues/3606
"""
# Just determine which parameter to place a constraint on; it doesn't
# matter which parameter it is to exhibit the problem so long as it's a
# valid parameter for the model
if '1D' in model_class.__name__:
param = 'c0'
else:
param = 'c0_0'
if issubclass(model_class, Linear1D):
param = 'intercept'
if issubclass(model_class, OrthoPolynomialBase):
degree = (2, 2)
else:
degree = (2,)
m = model_class(*degree, fixed={param: True})
assert m.fixed[param] is True
assert getattr(m, param).fixed is True
if issubclass(model_class, OrthoPolynomialBase):
assert repr(m) == (f"<{model_class.__name__}(2, 2, c0_0=0., c1_0=0., c2_0=0., c0_1=0., "
"c1_1=0., c2_1=0., c0_2=0., c1_2=0., c2_2=0.)>")
assert str(m) == (
f"Model: {model_class.__name__}\n"
"Inputs: ('x', 'y')\n"
"Outputs: ('z',)\n"
"Model set size: 1\n"
"X_Degree: 2\n"
"Y_Degree: 2\n"
"Parameters:\n"
" c0_0 c1_0 c2_0 c0_1 c1_1 c2_1 c0_2 c1_2 c2_2\n"
" ---- ---- ---- ---- ---- ---- ---- ---- ----\n"
" 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0"
)
else:
if model_class.__name__ == 'Polynomial2D':
assert repr(m) == ("<Polynomial2D(2, c0_0=0., c1_0=0., c2_0=0., "
"c0_1=0., c0_2=0., c1_1=0.)>")
assert str(m) == (
"Model: Polynomial2D\n"
"Inputs: ('x', 'y')\n"
"Outputs: ('z',)\n"
"Model set size: 1\n"
"Degree: 2\n"
"Parameters:\n"
" c0_0 c1_0 c2_0 c0_1 c0_2 c1_1\n"
" ---- ---- ---- ---- ---- ----\n"
" 0.0 0.0 0.0 0.0 0.0 0.0"
)
elif model_class.__name__ == 'Linear1D':
assert repr(m) == "<Linear1D(slope=2., intercept=0.)>"
assert str(m) == (
"Model: Linear1D\n"
"Inputs: ('x',)\n"
"Outputs: ('y',)\n"
"Model set size: 1\n"
"Parameters:\n"
" slope intercept\n"
" ----- ---------\n"
" 2.0 0.0"
)
else:
assert repr(m) == f"<{model_class.__name__}(2, c0=0., c1=0., c2=0.)>"
assert str(m) == (
f"Model: {model_class.__name__}\n"
"Inputs: ('x',)\n"
"Outputs: ('y',)\n"
"Model set size: 1\n"
"Degree: 2\n"
"Parameters:\n"
" c0 c1 c2\n"
" --- --- ---\n"
" 0.0 0.0 0.0"
)
def test_sip_hst():
"""Test SIP against astropy.wcs"""
test_file = get_pkg_data_filename(os.path.join('data', 'hst_sip.hdr'))
hdr = fits.Header.fromtextfile(test_file)
crpix1 = hdr['CRPIX1']
crpix2 = hdr['CRPIX2']
wobj = wcs.WCS(hdr)
a_pars = dict(**hdr['A_*'])
b_pars = dict(**hdr['B_*'])
a_order = a_pars.pop('A_ORDER')
b_order = b_pars.pop('B_ORDER')
sip = SIP([crpix1, crpix2], a_order, b_order, a_pars, b_pars)
coords = [1, 1]
rel_coords = [1 - crpix1, 1 - crpix2]
astwcs_result = wobj.sip_pix2foc([coords], 1)[0] - rel_coords
assert_allclose(sip(1, 1), astwcs_result)
# Test changing of inputs and calling it with keyword argumenrts.
sip.inputs = ("r", "t")
assert_allclose(sip(r=1, t=1), astwcs_result)
assert_allclose(sip(1, t=1), astwcs_result)
# Test representations
assert repr(sip) == (
"<SIP([<Shift(offset=-2048.)>, <Shift(offset=-1024.)>, "
"<_SIP1D(4, 'A', A_2_0=0.00000855, A_3_0=-0., A_4_0=0., A_0_2=0.00000217, "
"A_0_3=0., A_0_4=0., A_1_1=-0.0000052, A_1_2=-0., A_1_3=-0., "
"A_2_1=-0., A_2_2=0., A_3_1=0.)>, "
"<_SIP1D(4, 'B', B_2_0=-0.00000175, B_3_0=0., B_4_0=-0., B_0_2=-0.00000722, "
"B_0_3=-0., B_0_4=-0., B_1_1=0.00000618, B_1_2=-0., B_1_3=0., "
"B_2_1=-0., B_2_2=-0., B_3_1=-0.)>])>"
)
with conf.set_temp('max_width', 80):
assert str(sip) == (
"Model: SIP\n"
" Model: Shift\n"
" Inputs: ('x',)\n"
" Outputs: ('y',)\n"
" Model set size: 1\n"
" Parameters:\n"
" offset\n"
" -------\n"
" -2048.0\n"
"\n"
" Model: Shift\n"
" Inputs: ('x',)\n"
" Outputs: ('y',)\n"
" Model set size: 1\n"
" Parameters:\n"
" offset\n"
" -------\n"
" -1024.0\n"
"\n"
" Model: _SIP1D\n"
" Inputs: ('x', 'y')\n"
" Outputs: ('z',)\n"
" Model set size: 1\n"
" Order: 4\n"
" Coeff. Prefix: A\n"
" Parameters:\n"
" A_2_0 A_3_0 ... A_3_1 \n"
" --------------------- ---------------------- ... ---------------------\n"
" 8.551277582556502e-06 -4.730444829222791e-10 ... 1.971022971660309e-15\n"
"\n"
" Model: _SIP1D\n"
" Inputs: ('x', 'y')\n"
" Outputs: ('z',)\n"
" Model set size: 1\n"
" Order: 4\n"
" Coeff. Prefix: B\n"
" Parameters:\n"
" B_2_0 B_3_0 ... B_3_1 \n"
" ---------------------- --------------------- ... ----------------------\n"
" -1.746491877058669e-06 8.567635427816317e-11 ... -3.779506805487476e-15\n"
)
# Test get num of coeffs
assert sip.sip1d_a.get_num_coeff(1) == 6
# Test error
message = "Degree of polynomial must be 2< deg < 9"
sip.sip1d_a.order = 1
with pytest.raises(ValueError) as err:
sip.sip1d_a.get_num_coeff(1)
assert str(err.value) == message
sip.sip1d_a.order = 10
with pytest.raises(ValueError) as err:
sip.sip1d_a.get_num_coeff(1)
assert str(err.value) == message
def test_sip_irac():
"""Test forward and inverse SIP against astropy.wcs"""
test_file = get_pkg_data_filename(os.path.join('data', 'irac_sip.hdr'))
hdr = fits.Header.fromtextfile(test_file)
crpix1 = hdr['CRPIX1']
crpix2 = hdr['CRPIX2']
wobj = wcs.WCS(hdr)
a_pars = dict(**hdr['A_*'])
b_pars = dict(**hdr['B_*'])
ap_pars = dict(**hdr['AP_*'])
bp_pars = dict(**hdr['BP_*'])
a_order = a_pars.pop('A_ORDER')
b_order = b_pars.pop('B_ORDER')
ap_order = ap_pars.pop('AP_ORDER')
bp_order = bp_pars.pop('BP_ORDER')
del a_pars['A_DMAX']
del b_pars['B_DMAX']
pix = [200, 200]
rel_pix = [200 - crpix1, 200 - crpix2]
sip = SIP([crpix1, crpix2], a_order, b_order, a_pars, b_pars,
ap_order=ap_order, ap_coeff=ap_pars, bp_order=bp_order,
bp_coeff=bp_pars)
foc = wobj.sip_pix2foc([pix], 1)
newpix = wobj.sip_foc2pix(foc, 1)[0]
assert_allclose(sip(*pix), foc[0] - rel_pix)
assert_allclose(sip.inverse(*foc[0]) +
foc[0] - rel_pix, newpix - pix)
# Test inverse representations
assert repr(sip.inverse) == (
"<InverseSIP([<Polynomial2D(2, c0_0=0., c1_0=0.0000114, c2_0=0.00002353, "
"c0_1=-0.00000546, c0_2=-0.00000667, c1_1=-0.00001801)>, "
"<Polynomial2D(2, c0_0=0., c1_0=-0.00001495, c2_0=0.00000122, c0_1=0.00001975, "
"c0_2=-0.00002601, c1_1=0.00002944)>])>"
)
assert str(sip.inverse) == (
"Model: InverseSIP\n"
" Model: Polynomial2D\n"
" Inputs: ('x', 'y')\n"
" Outputs: ('z',)\n"
" Model set size: 1\n"
" Degree: 2\n"
" Parameters:\n"
" c0_0 c1_0 c2_0 c0_1 c0_2 c1_1 \n"
" ---- -------- --------- ---------- ---------- ----------\n"
" 0.0 1.14e-05 2.353e-05 -5.463e-06 -6.666e-06 -1.801e-05\n"
"\n"
" Model: Polynomial2D\n"
" Inputs: ('x', 'y')\n"
" Outputs: ('z',)\n"
" Model set size: 1\n"
" Degree: 2\n"
" Parameters:\n"
" c0_0 c1_0 c2_0 c0_1 c0_2 c1_1 \n"
" ---- ---------- --------- --------- ---------- ---------\n"
" 0.0 -1.495e-05 1.225e-06 1.975e-05 -2.601e-05 2.944e-05\n"
)
def test_sip_no_coeff():
sip = SIP([10, 12], 2, 2)
assert_allclose(sip.sip1d_a.parameters, [0., 0., 0])
assert_allclose(sip.sip1d_b.parameters, [0., 0., 0])
with pytest.raises(NotImplementedError):
sip.inverse
# Test model set
sip = SIP([10, 12], 2, 2, n_models=2)
assert sip.sip1d_a.model_set_axis == 0
assert sip.sip1d_b.model_set_axis == 0
@pytest.mark.parametrize('cls', (Polynomial1D, Chebyshev1D, Legendre1D,
Polynomial2D, Chebyshev2D, Legendre2D))
def test_zero_degree_polynomial(cls):
"""
A few tests that degree=0 polynomials are correctly evaluated and
fitted.
Regression test for https://github.com/astropy/astropy/pull/3589
"""
message = "Degree of polynomial must be positive or null"
if cls.n_inputs == 1: # Test 1D polynomials
p1 = cls(degree=0, c0=1)
assert p1(0) == 1
assert np.all(p1(np.zeros(5)) == np.ones(5))
x = np.linspace(0, 1, 100)
# Add a little noise along a straight line
y = 1 + np.random.uniform(0, 0.1, len(x))
p1_init = cls(degree=0)
fitter = fitting.LinearLSQFitter()
p1_fit = fitter(p1_init, x, y)
# The fit won't be exact of course, but it should get close to within
# 1%
assert_allclose(p1_fit.c0, 1, atol=0.10)
# Error from negative degree
with pytest.raises(ValueError) as err:
cls(degree=-1)
assert str(err.value) == message
elif cls.n_inputs == 2: # Test 2D polynomials
if issubclass(cls, OrthoPolynomialBase):
p2 = cls(x_degree=0, y_degree=0, c0_0=1)
# different shaped x and y inputs
a = np.array([1, 2, 3])
b = np.array([1, 2])
with mk.patch.object(PolynomialBase, 'prepare_inputs', autospec=True,
return_value=((a, b), mk.MagicMock())):
with pytest.raises(ValueError) as err:
p2.prepare_inputs(mk.MagicMock(), mk.MagicMock())
assert str(err.value) == "Expected input arrays to have the same shape"
# Error from negative degree
with pytest.raises(ValueError) as err:
cls(x_degree=-1, y_degree=0)
assert str(err.value) == message
with pytest.raises(ValueError) as err:
cls(x_degree=0, y_degree=-1)
assert str(err.value) == message
else:
p2 = cls(degree=0, c0_0=1)
# Error from negative degree
with pytest.raises(ValueError) as err:
cls(degree=-1)
assert str(err.value) == message
assert p2(0, 0) == 1
assert np.all(p2(np.zeros(5), np.zeros(5)) == np.ones(5))
y, x = np.mgrid[0:1:100j, 0:1:100j]
z = (1 + np.random.uniform(0, 0.1, x.size)).reshape(100, 100)
if issubclass(cls, OrthoPolynomialBase):
p2_init = cls(x_degree=0, y_degree=0)
else:
p2_init = cls(degree=0)
fitter = fitting.LinearLSQFitter()
p2_fit = fitter(p2_init, x, y, z)
assert_allclose(p2_fit.c0_0, 1, atol=0.10)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', fitters)
def test_2d_orthopolynomial_in_compound_model(fitter):
"""
Ensure that OrthoPolynomialBase (ie. Chebyshev2D & Legendre2D) models get
evaluated & fitted correctly when part of a compound model.
Regression test for https://github.com/astropy/astropy/pull/6085.
"""
fitter = fitter()
y, x = np.mgrid[0:5, 0:5]
z = x + y
simple_model = Chebyshev2D(2, 2)
with pytest.warns(AstropyUserWarning,
match='Model is linear in parameters'):
simple_fit = fitter(simple_model, x, y, z)
compound_model = Identity(2) | Chebyshev2D(2, 2)
compound_model.fittable = True
compound_model.linear = True
with pytest.warns(AstropyUserWarning,
match='Model is linear in parameters'):
compound_fit = fitter(compound_model, x, y, z)
assert_allclose(simple_fit(x, y), compound_fit(x, y), atol=1e-11)
def test_Hermite1D_clenshaw():
model = Hermite1D(degree=2)
assert model.clenshaw(1, [3]) == 3
assert model.clenshaw(1, [3, 4]) == 11
assert model.clenshaw(1, [3, 4, 5]) == 21
assert model.clenshaw(1, [3, 4, 5, 6]) == -3
def test__fcache():
model = OrthoPolynomialBase(x_degree=2, y_degree=2)
with pytest.raises(NotImplementedError) as err:
model._fcache(np.asanyarray(1), np.asanyarray(1))
assert str(err.value) == "Subclasses should implement this"
model = Hermite2D(x_degree=2, y_degree=2)
assert model._fcache(np.asanyarray(1), np.asanyarray(1)) == {
0: np.asanyarray(1),
1: 2,
3: np.asanyarray(1),
4: 2,
2: 2.0,
5: -4.0
}
model = Legendre2D(x_degree=2, y_degree=2)
assert model._fcache(np.asanyarray(1), np.asanyarray(1)) == {
0: np.asanyarray(1),
1: np.asanyarray(1),
2: 1.0,
3: np.asanyarray(1),
4: np.asanyarray(1),
5: 1.0
}
model = Chebyshev2D(x_degree=2, y_degree=2)
assert model._fcache(np.asanyarray(1), np.asanyarray(1)) == {
0: np.asanyarray(1),
1: np.asanyarray(1),
2: 1.0,
3: np.asanyarray(1),
4: np.asanyarray(1),
5: 1.0
}
def test_fit_deriv_shape_error():
model = Hermite2D(x_degree=2, y_degree=2)
with pytest.raises(ValueError) as err:
model.fit_deriv(np.array([1, 2]), np.array([3, 4, 5]))
assert str(err.value) == "x and y must have the same shape"
model = Chebyshev2D(x_degree=2, y_degree=2)
with pytest.raises(ValueError) as err:
model.fit_deriv(np.array([1, 2]), np.array([3, 4, 5]))
assert str(err.value) == "x and y must have the same shape"
model = Legendre2D(x_degree=2, y_degree=2)
with pytest.raises(ValueError) as err:
model.fit_deriv(np.array([1, 2]), np.array([3, 4, 5]))
assert str(err.value) == "x and y must have the same shape"
model = Polynomial2D(degree=2)
with pytest.raises(ValueError) as err:
model.fit_deriv(np.array([1, 2]), np.array([3, 4, 5]))
assert str(err.value) == "Expected x and y to be of equal size"
|
478ff40b80cac489a38daed054cc55fa5ad7245783be8dcfd1d0c866eb235bdb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import unittest.mock as mk
import numpy as np
import pytest
import astropy.units as u
from astropy.coordinates import SpectralCoord
from astropy.modeling.bounding_box import (
CompoundBoundingBox, ModelBoundingBox, _BaseInterval, _BaseSelectorArgument, _BoundingDomain,
_ignored_interval, _Interval, _SelectorArgument, _SelectorArguments)
from astropy.modeling.core import Model, fix_inputs
from astropy.modeling.models import Gaussian1D, Gaussian2D, Identity, Polynomial2D, Scale, Shift
class Test_Interval:
def test_create(self):
lower = mk.MagicMock()
upper = mk.MagicMock()
interval = _Interval(lower, upper)
assert isinstance(interval, _BaseInterval)
assert interval.lower == lower
assert interval.upper == upper
assert interval == (lower, upper)
assert interval.__repr__() == f"Interval(lower={lower}, upper={upper})"
def test_copy(self):
interval = _Interval(0.5, 1.5)
copy = interval.copy()
assert interval == copy
assert id(interval) != id(copy)
# Same float values have will have same id
assert interval.lower == copy.lower
assert id(interval.lower) == id(copy.lower)
# Same float values have will have same id
assert interval.upper == copy.upper
assert id(interval.upper) == id(copy.upper)
def test__validate_shape(self):
message = "An interval must be some sort of sequence of length 2"
lower = mk.MagicMock()
upper = mk.MagicMock()
interval = _Interval(lower, upper)
# Passes (2,)
interval._validate_shape((1, 2))
interval._validate_shape([1, 2])
interval._validate_shape((1*u.m, 2*u.m))
interval._validate_shape([1*u.m, 2*u.m])
# Passes (1, 2)
interval._validate_shape(((1, 2),))
interval._validate_shape(([1, 2],))
interval._validate_shape([(1, 2)])
interval._validate_shape([[1, 2]])
interval._validate_shape(((1*u.m, 2*u.m),))
interval._validate_shape(([1*u.m, 2*u.m],))
interval._validate_shape([(1*u.m, 2*u.m)])
interval._validate_shape([[1*u.m, 2*u.m]])
# Passes (2, 0)
interval._validate_shape((mk.MagicMock(), mk.MagicMock()))
interval._validate_shape([mk.MagicMock(), mk.MagicMock()])
# Passes with array inputs:
interval._validate_shape((np.array([-2.5, -3.5]), np.array([2.5, 3.5])))
interval._validate_shape((np.array([-2.5, -3.5, -4.5]),
np.array([2.5, 3.5, 4.5])))
# Fails shape (no units)
with pytest.raises(ValueError) as err:
interval._validate_shape((1, 2, 3))
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape([1, 2, 3])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape([[1, 2, 3], [4, 5, 6]])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape(1)
assert str(err.value) == message
# Fails shape (units)
message = "An interval must be some sort of sequence of length 2"
with pytest.raises(ValueError) as err:
interval._validate_shape((1*u.m, 2*u.m, 3*u.m))
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape([1*u.m, 2*u.m, 3*u.m])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape([[1*u.m, 2*u.m, 3*u.m], [4*u.m, 5*u.m, 6*u.m]])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape(1*u.m)
assert str(err.value) == message
# Fails shape (arrays):
with pytest.raises(ValueError) as err:
interval._validate_shape((np.array([-2.5, -3.5]),
np.array([2.5, 3.5]),
np.array([3, 4])))
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape((np.array([-2.5, -3.5]), [2.5, 3.5]))
assert str(err.value) == message
def test__validate_bounds(self):
# Passes
assert _Interval._validate_bounds(1, 2) == (1, 2)
assert _Interval._validate_bounds(1*u.m, 2*u.m) == (1*u.m, 2*u.m)
interval = _Interval._validate_bounds(np.array([-2.5, -3.5]), np.array([2.5, 3.5]))
assert (interval.lower == np.array([-2.5, -3.5])).all()
assert (interval.upper == np.array([2.5, 3.5])).all()
# Fails
with pytest.warns(RuntimeWarning,
match="Invalid interval: upper bound 1 is strictly "
r"less than lower bound 2\."):
_Interval._validate_bounds(2, 1)
with pytest.warns(RuntimeWarning,
match=r"Invalid interval: upper bound 1\.0 m is strictly "
r"less than lower bound 2\.0 m\."):
_Interval._validate_bounds(2*u.m, 1*u.m)
def test_validate(self):
# Passes
assert _Interval.validate((1, 2)) == (1, 2)
assert _Interval.validate([1, 2]) == (1, 2)
assert _Interval.validate((1*u.m, 2*u.m)) == (1*u.m, 2*u.m)
assert _Interval.validate([1*u.m, 2*u.m]) == (1*u.m, 2*u.m)
assert _Interval.validate(((1, 2),)) == (1, 2)
assert _Interval.validate(([1, 2],)) == (1, 2)
assert _Interval.validate([(1, 2)]) == (1, 2)
assert _Interval.validate([[1, 2]]) == (1, 2)
assert _Interval.validate(((1*u.m, 2*u.m),)) == (1*u.m, 2*u.m)
assert _Interval.validate(([1*u.m, 2*u.m],)) == (1*u.m, 2*u.m)
assert _Interval.validate([(1*u.m, 2*u.m)]) == (1*u.m, 2*u.m)
assert _Interval.validate([[1*u.m, 2*u.m]]) == (1*u.m, 2*u.m)
interval = _Interval.validate((np.array([-2.5, -3.5]),
np.array([2.5, 3.5])))
assert (interval.lower == np.array([-2.5, -3.5])).all()
assert (interval.upper == np.array([2.5, 3.5])).all()
interval = _Interval.validate((np.array([-2.5, -3.5, -4.5]),
np.array([2.5, 3.5, 4.5])))
assert (interval.lower == np.array([-2.5, -3.5, -4.5])).all()
assert (interval.upper == np.array([2.5, 3.5, 4.5])).all()
# Fail shape
with pytest.raises(ValueError):
_Interval.validate((1, 2, 3))
# Fail bounds
with pytest.warns(RuntimeWarning):
_Interval.validate((2, 1))
def test_outside(self):
interval = _Interval.validate((0, 1))
assert (interval.outside(np.linspace(-1, 2, 13)) ==
[True, True, True, True,
False, False, False, False, False,
True, True, True, True]).all()
def test_domain(self):
interval = _Interval.validate((0, 1))
assert (interval.domain(0.25) == np.linspace(0, 1, 5)).all()
def test__ignored_interval(self):
assert _ignored_interval.lower == -np.inf
assert _ignored_interval.upper == np.inf
for num in [0, -1, -100, 3.14, 10**100, -10**100]:
assert not num < _ignored_interval[0]
assert num > _ignored_interval[0]
assert not num > _ignored_interval[1]
assert num < _ignored_interval[1]
assert not (_ignored_interval.outside(np.array([num]))).all()
def test_validate_with_SpectralCoord(self):
"""Regression test for issue #12439"""
lower = SpectralCoord(1, u.um)
upper = SpectralCoord(10, u.um)
interval = _Interval.validate((lower, upper))
assert interval.lower == lower
assert interval.upper == upper
class Test_BoundingDomain:
def setup(self):
class BoundingDomain(_BoundingDomain):
def fix_inputs(self, model, fix_inputs):
super().fix_inputs(model, fixed_inputs=fix_inputs)
def prepare_inputs(self, input_shape, inputs):
super().prepare_inputs(input_shape, inputs)
self.BoundingDomain = BoundingDomain
def test_create(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == 'C'
bounding_box = self.BoundingDomain(model, order='F')
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == 'F'
bounding_box = self.BoundingDomain(Gaussian2D(), ['x'])
assert bounding_box._ignored == [0]
assert bounding_box._order == 'C'
# Error
with pytest.raises(ValueError):
self.BoundingDomain(model, order=mk.MagicMock())
def test_model(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
assert bounding_box._model == model
assert bounding_box.model == model
def test_order(self):
bounding_box = self.BoundingDomain(mk.MagicMock(), order='C')
assert bounding_box._order == 'C'
assert bounding_box.order == 'C'
bounding_box = self.BoundingDomain(mk.MagicMock(), order='F')
assert bounding_box._order == 'F'
assert bounding_box.order == 'F'
bounding_box._order = 'test'
assert bounding_box.order == 'test'
def test_ignored(self):
ignored = [0]
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ['x']
bounding_box = self.BoundingDomain(model, ignored=ignored)
assert bounding_box._ignored == ignored
assert bounding_box.ignored == ignored
def test__get_order(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Success (default 'C')
assert bounding_box._order == 'C'
assert bounding_box._get_order() == 'C'
assert bounding_box._get_order('C') == 'C'
assert bounding_box._get_order('F') == 'F'
# Success (default 'F')
bounding_box._order = 'F'
assert bounding_box._order == 'F'
assert bounding_box._get_order() == 'F'
assert bounding_box._get_order('C') == 'C'
assert bounding_box._get_order('F') == 'F'
# Error
order = mk.MagicMock()
with pytest.raises(ValueError) as err:
bounding_box._get_order(order)
assert str(err.value) == ("order must be either 'C' (C/python order) or "
f"'F' (Fortran/mathematical order), got: {order}.")
def test__get_index(self):
bounding_box = self.BoundingDomain(Gaussian2D())
# Pass input name
assert bounding_box._get_index('x') == 0
assert bounding_box._get_index('y') == 1
# Pass invalid input name
with pytest.raises(ValueError) as err:
bounding_box._get_index('z')
assert str(err.value) == "'z' is not one of the inputs: ('x', 'y')."
# Pass valid index
assert bounding_box._get_index(0) == 0
assert bounding_box._get_index(1) == 1
assert bounding_box._get_index(np.int32(0)) == 0
assert bounding_box._get_index(np.int32(1)) == 1
assert bounding_box._get_index(np.int64(0)) == 0
assert bounding_box._get_index(np.int64(1)) == 1
# Pass invalid index
MESSAGE = "Integer key: 2 must be non-negative and < 2."
with pytest.raises(IndexError) as err:
bounding_box._get_index(2)
assert str(err.value) == MESSAGE
with pytest.raises(IndexError) as err:
bounding_box._get_index(np.int32(2))
assert str(err.value) == MESSAGE
with pytest.raises(IndexError) as err:
bounding_box._get_index(np.int64(2))
assert str(err.value) == MESSAGE
with pytest.raises(IndexError) as err:
bounding_box._get_index(-1)
assert str(err.value) == "Integer key: -1 must be non-negative and < 2."
# Pass invalid key
value = mk.MagicMock()
with pytest.raises(ValueError) as err:
bounding_box._get_index(value)
assert str(err.value) == f"Key value: {value} must be string or integer."
def test__get_name(self):
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ['x']
bounding_box = self.BoundingDomain(model)
index = mk.MagicMock()
name = mk.MagicMock()
model.inputs = mk.MagicMock()
model.inputs.__getitem__.return_value = name
assert bounding_box._get_name(index) == name
assert model.inputs.__getitem__.call_args_list == [mk.call(index)]
def test_ignored_inputs(self):
model = mk.MagicMock()
ignored = list(range(4, 8))
model.n_inputs = 8
model.inputs = [mk.MagicMock() for _ in range(8)]
bounding_box = self.BoundingDomain(model, ignored=ignored)
inputs = bounding_box.ignored_inputs
assert isinstance(inputs, list)
for index, _input in enumerate(inputs):
assert _input in model.inputs
assert model.inputs[index + 4] == _input
for index, _input in enumerate(model.inputs):
if _input in inputs:
assert inputs[index - 4] == _input
else:
assert index < 4
def test__validate_ignored(self):
bounding_box = self.BoundingDomain(Gaussian2D())
# Pass
assert bounding_box._validate_ignored(None) == []
assert bounding_box._validate_ignored(['x', 'y']) == [0, 1]
assert bounding_box._validate_ignored([0, 1]) == [0, 1]
assert bounding_box._validate_ignored([np.int32(0), np.int64(1)]) == [0, 1]
# Fail
with pytest.raises(ValueError):
bounding_box._validate_ignored([mk.MagicMock()])
with pytest.raises(ValueError):
bounding_box._validate_ignored(['z'])
with pytest.raises(IndexError):
bounding_box._validate_ignored([3])
with pytest.raises(IndexError):
bounding_box._validate_ignored([np.int32(3)])
with pytest.raises(IndexError):
bounding_box._validate_ignored([np.int64(3)])
def test___call__(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
args = tuple([mk.MagicMock() for _ in range(3)])
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
with pytest.raises(RuntimeError) as err:
bounding_box(*args, **kwargs)
assert str(err.value) == ("This bounding box is fixed by the model and does not have "
"adjustable parameters.")
def test_fix_inputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
model = mk.MagicMock()
fixed_inputs = mk.MagicMock()
with pytest.raises(NotImplementedError) as err:
bounding_box.fix_inputs(model, fixed_inputs)
assert str(err.value) == "This should be implemented by a child class."
def test__prepare_inputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
with pytest.raises(NotImplementedError) as err:
bounding_box.prepare_inputs(mk.MagicMock(), mk.MagicMock())
assert str(err.value) == "This has not been implemented for BoundingDomain."
def test__base_ouput(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Simple shape
input_shape = (13,)
output = bounding_box._base_output(input_shape, 0)
assert (output == 0).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, np.nan)
assert (np.isnan(output)).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, 14)
assert (output == 14).all()
assert output.shape == input_shape
# Complex shape
input_shape = (13, 7)
output = bounding_box._base_output(input_shape, 0)
assert (output == 0).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, np.nan)
assert (np.isnan(output)).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, 14)
assert (output == 14).all()
assert output.shape == input_shape
def test__all_out_output(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
# Simple shape
model.n_outputs = 1
input_shape = (13,)
output, output_unit = bounding_box._all_out_output(input_shape, 0)
assert (np.array(output) == 0).all()
assert np.array(output).shape == (1, 13)
assert output_unit is None
# Complex shape
model.n_outputs = 6
input_shape = (13, 7)
output, output_unit = bounding_box._all_out_output(input_shape, 0)
assert (np.array(output) == 0).all()
assert np.array(output).shape == (6, 13, 7)
assert output_unit is None
def test__modify_output(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
# Simple shape
with mk.patch.object(_BoundingDomain, '_base_output', autospec=True,
return_value=np.asanyarray(0)) as mkBase:
assert (np.array([1, 2, 3]) ==
bounding_box._modify_output([1, 2, 3], valid_index,
input_shape, fill_value)).all()
assert mkBase.call_args_list == [mk.call(input_shape, fill_value)]
# Replacement
with mk.patch.object(_BoundingDomain, '_base_output', autospec=True,
return_value=np.array([1, 2, 3, 4, 5, 6])) as mkBase:
assert (np.array([7, 2, 8, 4, 9, 6]) ==
bounding_box._modify_output([7, 8, 9], np.array([[0, 2, 4]]),
input_shape, fill_value)).all()
assert mkBase.call_args_list == [mk.call(input_shape, fill_value)]
def test__prepare_outputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
valid_outputs = [mk.MagicMock() for _ in range(3)]
effects = [mk.MagicMock() for _ in range(3)]
with mk.patch.object(_BoundingDomain, '_modify_output', autospec=True,
side_effect=effects) as mkModify:
assert effects == bounding_box._prepare_outputs(valid_outputs, valid_index,
input_shape, fill_value)
assert mkModify.call_args_list == [
mk.call(bounding_box, valid_outputs[idx], valid_index, input_shape, fill_value)
for idx in range(3)
]
def test_prepare_outputs(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
valid_outputs = mk.MagicMock()
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
with mk.patch.object(_BoundingDomain, '_prepare_outputs', autospec=True) as mkPrepare:
# Reshape valid_outputs
model.n_outputs = 1
assert mkPrepare.return_value == bounding_box.prepare_outputs(valid_outputs,
valid_index,
input_shape,
fill_value)
assert mkPrepare.call_args_list == [
mk.call(bounding_box, [valid_outputs], valid_index, input_shape, fill_value)
]
mkPrepare.reset_mock()
# No reshape valid_outputs
model.n_outputs = 2
assert mkPrepare.return_value == bounding_box.prepare_outputs(valid_outputs,
valid_index,
input_shape,
fill_value)
assert mkPrepare.call_args_list == [
mk.call(bounding_box, valid_outputs, valid_index, input_shape, fill_value)
]
def test__get_valid_outputs_unit(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Don't get unit
assert bounding_box._get_valid_outputs_unit(mk.MagicMock(), False) is None
# Get unit from unitless
assert bounding_box._get_valid_outputs_unit(7, True) is None
# Get unit
assert bounding_box._get_valid_outputs_unit(25 * u.m, True) == u.m
def test__evaluate_model(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
evaluate = mk.MagicMock()
valid_inputs = mk.MagicMock()
input_shape = mk.MagicMock()
valid_index = mk.MagicMock()
fill_value = mk.MagicMock()
with_units = mk.MagicMock()
with mk.patch.object(_BoundingDomain, '_get_valid_outputs_unit',
autospec=True) as mkGet:
with mk.patch.object(_BoundingDomain, 'prepare_outputs',
autospec=True) as mkPrepare:
assert bounding_box._evaluate_model(evaluate, valid_inputs,
valid_index, input_shape,
fill_value, with_units) == (
mkPrepare.return_value,
mkGet.return_value
)
assert mkPrepare.call_args_list == [mk.call(bounding_box, evaluate.return_value,
valid_index, input_shape, fill_value)]
assert mkGet.call_args_list == [mk.call(evaluate.return_value, with_units)]
assert evaluate.call_args_list == [mk.call(valid_inputs)]
def test__evaluate(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
evaluate = mk.MagicMock()
inputs = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
with_units = mk.MagicMock()
valid_inputs = mk.MagicMock()
valid_index = mk.MagicMock()
effects = [(valid_inputs, valid_index, True), (valid_inputs, valid_index, False)]
with mk.patch.object(self.BoundingDomain, 'prepare_inputs', autospec=True,
side_effect=effects) as mkPrepare:
with mk.patch.object(_BoundingDomain, '_all_out_output',
autospec=True) as mkAll:
with mk.patch.object(_BoundingDomain, '_evaluate_model',
autospec=True) as mkEvaluate:
# all_out
assert bounding_box._evaluate(evaluate, inputs, input_shape,
fill_value, with_units) == mkAll.return_value
assert mkAll.call_args_list == [mk.call(bounding_box, input_shape, fill_value)]
assert mkEvaluate.call_args_list == []
assert mkPrepare.call_args_list == [mk.call(bounding_box, input_shape, inputs)]
mkAll.reset_mock()
mkPrepare.reset_mock()
# not all_out
assert bounding_box._evaluate(evaluate, inputs, input_shape,
fill_value, with_units) == mkEvaluate.return_value
assert mkAll.call_args_list == []
assert mkEvaluate.call_args_list == [mk.call(bounding_box, evaluate,
valid_inputs, valid_index,
input_shape, fill_value,
with_units)]
assert mkPrepare.call_args_list == [mk.call(bounding_box, input_shape, inputs)]
def test__set_outputs_unit(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# set no unit
assert 27 == bounding_box._set_outputs_unit(27, None)
# set unit
assert 27 * u.m == bounding_box._set_outputs_unit(27, u.m)
def test_evaluate(self):
bounding_box = self.BoundingDomain(Gaussian2D())
evaluate = mk.MagicMock()
inputs = mk.MagicMock()
fill_value = mk.MagicMock()
outputs = mk.MagicMock()
valid_outputs_unit = mk.MagicMock()
value = (outputs, valid_outputs_unit)
with mk.patch.object(_BoundingDomain, '_evaluate',
autospec=True, return_value=value) as mkEvaluate:
with mk.patch.object(_BoundingDomain, '_set_outputs_unit',
autospec=True) as mkSet:
with mk.patch.object(Model, 'input_shape', autospec=True) as mkShape:
with mk.patch.object(Model, 'bbox_with_units',
new_callable=mk.PropertyMock) as mkUnits:
assert tuple(mkSet.return_value) == bounding_box.evaluate(evaluate, inputs,
fill_value)
assert mkSet.call_args_list == [mk.call(outputs, valid_outputs_unit)]
assert mkEvaluate.call_args_list == [mk.call(bounding_box, evaluate, inputs,
mkShape.return_value,
fill_value,
mkUnits.return_value)]
assert mkShape.call_args_list == [mk.call(bounding_box._model, inputs)]
assert mkUnits.call_args_list == [mk.call()]
class TestModelBoundingBox:
def test_create(self):
intervals = ()
model = mk.MagicMock()
bounding_box = ModelBoundingBox(intervals, model)
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {}
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == 'C'
# Set optional
intervals = {}
model = mk.MagicMock()
bounding_box = ModelBoundingBox(intervals, model, order='F')
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {}
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == 'F'
# Set interval
intervals = (1, 2)
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ['x']
bounding_box = ModelBoundingBox(intervals, model)
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2)}
assert bounding_box._model == model
# Set ignored
intervals = (1, 2)
model = mk.MagicMock()
model.n_inputs = 2
model.inputs = ['x', 'y']
bounding_box = ModelBoundingBox(intervals, model, ignored=[1])
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2)}
assert bounding_box._model == model
assert bounding_box._ignored == [1]
intervals = ((1, 2), (3, 4))
model = mk.MagicMock()
model.n_inputs = 3
model.inputs = ['x', 'y', 'z']
bounding_box = ModelBoundingBox(intervals, model, ignored=[2], order='F')
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2), 1: (3, 4)}
assert bounding_box._model == model
assert bounding_box._ignored == [2]
assert bounding_box._order == 'F'
def test_copy(self):
bounding_box = ModelBoundingBox.validate(Gaussian2D(), ((-4.5, 4.5), (-1.4, 1.4)))
copy = bounding_box.copy()
assert bounding_box == copy
assert id(bounding_box) != id(copy)
assert bounding_box.ignored == copy.ignored
assert id(bounding_box.ignored) != id(copy.ignored)
# model is not copied to prevent infinite recursion
assert bounding_box._model == copy._model
assert id(bounding_box._model) == id(copy._model)
# Same string values have will have same id
assert bounding_box._order == copy._order
assert id(bounding_box._order) == id(copy._order)
# Check interval objects
for index, interval in bounding_box.intervals.items():
assert interval == copy.intervals[index]
assert id(interval) != id(copy.intervals[index])
# Same float values have will have same id
assert interval.lower == copy.intervals[index].lower
assert id(interval.lower) == id(copy.intervals[index].lower)
# Same float values have will have same id
assert interval.upper == copy.intervals[index].upper
assert id(interval.upper) == id(copy.intervals[index].upper)
assert len(bounding_box.intervals) == len(copy.intervals)
assert bounding_box.intervals.keys() == copy.intervals.keys()
def test_intervals(self):
intervals = {0: _Interval(1, 2)}
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ['x']
bounding_box = ModelBoundingBox(intervals, model)
assert bounding_box._intervals == intervals
assert bounding_box.intervals == intervals
def test_named_intervals(self):
intervals = {idx: _Interval(idx, idx + 1) for idx in range(4)}
model = mk.MagicMock()
model.n_inputs = 4
model.inputs = [mk.MagicMock() for _ in range(4)]
bounding_box = ModelBoundingBox(intervals, model)
named = bounding_box.named_intervals
assert isinstance(named, dict)
for name, interval in named.items():
assert name in model.inputs
assert intervals[model.inputs.index(name)] == interval
for index, name in enumerate(model.inputs):
assert index in intervals
assert name in named
assert intervals[index] == named[name]
def test___repr__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.__repr__() == (
"ModelBoundingBox(\n"
" intervals={\n"
" x: Interval(lower=-1, upper=1)\n"
" y: Interval(lower=-4, upper=4)\n"
" }\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
")"
)
intervals = {0: _Interval(-1, 1)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals, ignored=['y'])
assert bounding_box.__repr__() == (
"ModelBoundingBox(\n"
" intervals={\n"
" x: Interval(lower=-1, upper=1)\n"
" }\n"
" ignored=['y']\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
")"
)
def test___len__(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert len(bounding_box) == 1 == len(bounding_box._intervals)
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert len(bounding_box) == 2 == len(bounding_box._intervals)
bounding_box._intervals = {}
assert len(bounding_box) == 0 == len(bounding_box._intervals)
def test___contains__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Contains with keys
assert 'x' in bounding_box
assert 'y' in bounding_box
assert 'z' not in bounding_box
# Contains with index
assert 0 in bounding_box
assert 1 in bounding_box
assert 2 not in bounding_box
# General not in
assert mk.MagicMock() not in bounding_box
# Contains with ignored
del bounding_box['y']
# Contains with keys
assert 'x' in bounding_box
assert 'y' in bounding_box
assert 'z' not in bounding_box
# Contains with index
assert 0 in bounding_box
assert 1 in bounding_box
assert 2 not in bounding_box
def test___getitem__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Get using input key
assert bounding_box['x'] == (-1, 1)
assert bounding_box['y'] == (-4, 4)
# Fail with input key
with pytest.raises(ValueError):
bounding_box['z']
# Get using index
assert bounding_box[0] == (-1, 1)
assert bounding_box[1] == (-4, 4)
assert bounding_box[np.int32(0)] == (-1, 1)
assert bounding_box[np.int32(1)] == (-4, 4)
assert bounding_box[np.int64(0)] == (-1, 1)
assert bounding_box[np.int64(1)] == (-4, 4)
# Fail with index
with pytest.raises(IndexError):
bounding_box[2]
with pytest.raises(IndexError):
bounding_box[np.int32(2)]
with pytest.raises(IndexError):
bounding_box[np.int64(2)]
# get ignored interval
del bounding_box[0]
assert bounding_box[0] == _ignored_interval
assert bounding_box[1] == (-4, 4)
del bounding_box[1]
assert bounding_box[0] == _ignored_interval
assert bounding_box[1] == _ignored_interval
def test_bounding_box(self):
# 0D
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, {}, ignored=['x'])
assert bounding_box.bounding_box() == (-np.inf, np.inf)
assert bounding_box.bounding_box('C') == (-np.inf, np.inf)
assert bounding_box.bounding_box('F') == (-np.inf, np.inf)
# 1D
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.bounding_box() == (-1, 1)
assert bounding_box.bounding_box(mk.MagicMock()) == (-1, 1)
# > 1D
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.bounding_box() == ((-4, 4), (-1, 1))
assert bounding_box.bounding_box('C') == ((-4, 4), (-1, 1))
assert bounding_box.bounding_box('F') == ((-1, 1), (-4, 4))
def test___eq__(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model.copy(), intervals.copy())
assert bounding_box == bounding_box
assert bounding_box == ModelBoundingBox.validate(model.copy(), intervals.copy())
assert bounding_box == (-1, 1)
assert not (bounding_box == mk.MagicMock())
assert not (bounding_box == (-2, 2))
assert not (bounding_box == ModelBoundingBox.validate(model, {0: _Interval(-2, 2)}))
# Respect ordering
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box_1 = ModelBoundingBox.validate(model, intervals)
bounding_box_2 = ModelBoundingBox.validate(model, intervals, order='F')
assert bounding_box_1._order == 'C'
assert bounding_box_1 == ((-4, 4), (-1, 1))
assert not (bounding_box_1 == ((-1, 1), (-4, 4)))
assert bounding_box_2._order == 'F'
assert not (bounding_box_2 == ((-4, 4), (-1, 1)))
assert bounding_box_2 == ((-1, 1), (-4, 4))
assert bounding_box_1 == bounding_box_2
# Respect ignored
model = Gaussian2D()
bounding_box_1._ignored = [mk.MagicMock()]
bounding_box_2._ignored = [mk.MagicMock()]
assert bounding_box_1._ignored != bounding_box_2._ignored
assert not (bounding_box_1 == bounding_box_2)
def test__setitem__(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, {}, ignored=[0, 1])
assert bounding_box._ignored == [0, 1]
# USING Intervals directly
# Set interval using key
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box['x'] = _Interval(-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box['x'], _Interval)
assert bounding_box['x'] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box['y'] = _Interval(-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box['y'], _Interval)
assert bounding_box['y'] == (-4, 4)
del bounding_box['x']
del bounding_box['y']
# Set interval using index
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box[0] = _Interval(-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box[0], _Interval)
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box[1] = _Interval(-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box[1], _Interval)
assert bounding_box[1] == (-4, 4)
del bounding_box[0]
del bounding_box[1]
# USING tuples
# Set interval using key
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box['x'] = (-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box['x'], _Interval)
assert bounding_box['x'] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box['y'] = (-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box['y'], _Interval)
assert bounding_box['y'] == (-4, 4)
del bounding_box['x']
del bounding_box['y']
# Set interval using index
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box[0] = (-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box[0], _Interval)
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box[1] = (-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box[1], _Interval)
assert bounding_box[1] == (-4, 4)
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
# USING Intervals directly
# Set interval using key
assert 'x' not in bounding_box
bounding_box['x'] = _Interval(np.array([-1, -2]), np.array([1, 2]))
assert 'x' in bounding_box
assert isinstance(bounding_box['x'], _Interval)
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
# Set interval using index
bounding_box._intervals = {}
assert 0 not in bounding_box
bounding_box[0] = _Interval(np.array([-1, -2]), np.array([1, 2]))
assert 0 in bounding_box
assert isinstance(bounding_box[0], _Interval)
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
# USING tuples
# Set interval using key
bounding_box._intervals = {}
assert 'x' not in bounding_box
bounding_box['x'] = (np.array([-1, -2]), np.array([1, 2]))
assert 'x' in bounding_box
assert isinstance(bounding_box['x'], _Interval)
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
# Set interval using index
bounding_box._intervals = {}
assert 0 not in bounding_box
bounding_box[0] = (np.array([-1, -2]), np.array([1, 2]))
assert 0 in bounding_box
assert isinstance(bounding_box[0], _Interval)
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
def test___delitem__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Using index
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert 0 in bounding_box
assert 'x' in bounding_box
del bounding_box[0]
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
assert 0 in bounding_box
assert 'x' in bounding_box
# Delete an ignored item
with pytest.raises(RuntimeError) as err:
del bounding_box[0]
assert str(err.value) == "Cannot delete ignored input: 0!"
# Using key
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert 0 in bounding_box
assert 'y' in bounding_box
del bounding_box['y']
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
assert 0 in bounding_box
assert 'y' in bounding_box
# Delete an ignored item
with pytest.raises(RuntimeError) as err:
del bounding_box['y']
assert str(err.value) == "Cannot delete ignored input: y!"
def test__validate_dict(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Input name keys
intervals = {'x': _Interval(-1, 1), 'y': _Interval(-4, 4)}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_dict(intervals)
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Input index
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert 0 not in bounding_box
assert 1 not in bounding_box
bounding_box._validate_dict(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
# name keys
intervals = {'x': _Interval(np.array([-1, -2]), np.array([1, 2]))}
assert 'x' not in bounding_box
bounding_box._validate_dict(intervals)
assert 'x' in bounding_box
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
# input index
bounding_box._intervals = {}
intervals = {0: _Interval(np.array([-1, -2]), np.array([1, 2]))}
assert 0 not in bounding_box
bounding_box._validate_dict(intervals)
assert 0 in bounding_box
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
def test__validate_sequence(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Default order
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)))
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# C order
bounding_box._intervals = {}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order='C')
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Fortran order
bounding_box._intervals = {}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order='F')
assert 'x' in bounding_box
assert bounding_box['x'] == (-4, 4)
assert 'y' in bounding_box
assert bounding_box['y'] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Invalid order
bounding_box._intervals = {}
order = mk.MagicMock()
assert 'x' not in bounding_box
assert 'y' not in bounding_box
with pytest.raises(ValueError):
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order=order)
assert 'x' not in bounding_box
assert 'y' not in bounding_box
assert len(bounding_box.intervals) == 0
def test__n_inputs(self):
model = Gaussian2D()
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box._n_inputs == 2
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox.validate(model, intervals, ignored=['y'])
assert bounding_box._n_inputs == 1
bounding_box = ModelBoundingBox.validate(model, {}, ignored=['x', 'y'])
assert bounding_box._n_inputs == 0
bounding_box._ignored = ['x', 'y', 'z']
assert bounding_box._n_inputs == 0
def test__validate_iterable(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Pass sequence Default order
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_iterable(((-4, 4), (-1, 1)))
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box._intervals = {}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_iterable(((-4, 4), (-1, 1)), order='F')
assert 'x' in bounding_box
assert bounding_box['x'] == (-4, 4)
assert 'y' in bounding_box
assert bounding_box['y'] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert 0 not in bounding_box
assert 1 not in bounding_box
bounding_box._validate_iterable(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass with ignored
bounding_box._intervals = {}
bounding_box._ignored = [1]
intervals = {0: _Interval(-1, 1)}
assert 0 not in bounding_box.intervals
bounding_box._validate_iterable(intervals)
assert 0 in bounding_box.intervals
assert bounding_box[0] == (-1, 1)
# Invalid iterable
bounding_box._intervals = {}
bounding_box._ignored = []
assert 'x' not in bounding_box
assert 'y' not in bounding_box
with pytest.raises(ValueError) as err:
bounding_box._validate_iterable(((-4, 4), (-1, 1), (-3, 3)))
assert str(err.value) == "Found 3 intervals, but must have exactly 2."
assert len(bounding_box.intervals) == 0
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._ignored = [1]
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
with pytest.raises(ValueError) as err:
bounding_box._validate_iterable(intervals)
assert str(err.value) == "Found 2 intervals, but must have exactly 1."
assert len(bounding_box.intervals) == 0
bounding_box._ignored = []
intervals = {0: _Interval(-1, 1)}
with pytest.raises(ValueError) as err:
bounding_box._validate_iterable(intervals)
assert str(err.value) == "Found 1 intervals, but must have exactly 2."
assert 'x' not in bounding_box
assert 'y' not in bounding_box
assert len(bounding_box.intervals) == 0
def test__validate(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Pass sequence Default order
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate(((-4, 4), (-1, 1)))
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box._intervals = {}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate(((-4, 4), (-1, 1)), order='F')
assert 'x' in bounding_box
assert bounding_box['x'] == (-4, 4)
assert 'y' in bounding_box
assert bounding_box['y'] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass single with ignored
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox({}, model, ignored=[1])
assert 0 not in bounding_box.intervals
assert 1 not in bounding_box.intervals
bounding_box._validate(intervals)
assert 0 in bounding_box.intervals
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert len(bounding_box.intervals) == 1
# Pass single
model = Gaussian1D()
bounding_box = ModelBoundingBox({}, model)
assert 'x' not in bounding_box
bounding_box._validate((-1, 1))
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert len(bounding_box.intervals) == 1
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
sequence = (np.array([-1, -2]), np.array([1, 2]))
assert 'x' not in bounding_box
bounding_box._validate(sequence)
assert 'x' in bounding_box
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
def test_validate(self):
model = Gaussian2D()
kwargs = {'test': mk.MagicMock()}
# Pass sequence Default order
bounding_box = ModelBoundingBox.validate(model, ((-4, 4), (-1, 1)), **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box = ModelBoundingBox.validate(model, ((-4, 4), (-1, 1)), order='F', **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 'x' in bounding_box
assert bounding_box['x'] == (-4, 4)
assert 'y' in bounding_box
assert bounding_box['y'] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
bounding_box = ModelBoundingBox.validate(model, intervals, order='F', **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
assert bounding_box.order == 'F'
# Pass ModelBoundingBox
bbox = bounding_box
bounding_box = ModelBoundingBox.validate(model, bbox, **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
assert bounding_box.order == 'F'
# Pass single ignored
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox.validate(model, intervals, ignored=['y'], **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == _ignored_interval
assert len(bounding_box.intervals) == 1
# Pass single
bounding_box = ModelBoundingBox.validate(Gaussian1D(), (-1, 1), **kwargs)
assert (bounding_box._model.parameters == Gaussian1D().parameters).all()
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert len(bounding_box.intervals) == 1
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
sequence = (np.array([-1, -2]), np.array([1, 2]))
bounding_box = ModelBoundingBox.validate(model, sequence, **kwargs)
assert 'x' in bounding_box
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
def test_fix_inputs(self):
bounding_box = ModelBoundingBox.validate(Gaussian2D(), ((-4, 4), (-1, 1)))
# keep_ignored = False (default)
new_bounding_box = bounding_box.fix_inputs(Gaussian1D(), {1: mk.MagicMock()})
assert not (bounding_box == new_bounding_box)
assert (new_bounding_box._model.parameters == Gaussian1D().parameters).all()
assert 'x' in new_bounding_box
assert new_bounding_box['x'] == (-1, 1)
assert 'y' not in new_bounding_box
assert len(new_bounding_box.intervals) == 1
assert new_bounding_box.ignored == []
# keep_ignored = True
new_bounding_box = bounding_box.fix_inputs(Gaussian2D(), {1: mk.MagicMock()},
_keep_ignored=True)
assert not (bounding_box == new_bounding_box)
assert (new_bounding_box._model.parameters == Gaussian2D().parameters).all()
assert 'x' in new_bounding_box
assert new_bounding_box['x'] == (-1, 1)
assert 'y' in new_bounding_box
assert 'y' in new_bounding_box.ignored_inputs
assert len(new_bounding_box.intervals) == 1
assert new_bounding_box.ignored == [1]
def test_dimension(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.dimension == 1 == len(bounding_box._intervals)
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.dimension == 2 == len(bounding_box._intervals)
bounding_box._intervals = {}
assert bounding_box.dimension == 0 == len(bounding_box._intervals)
def test_domain(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# test defaults
assert (np.array(bounding_box.domain(0.25)) ==
np.array([np.linspace(0, 2, 9), np.linspace(-1, 1, 9)])).all()
# test C order
assert (np.array(bounding_box.domain(0.25, 'C')) ==
np.array([np.linspace(0, 2, 9), np.linspace(-1, 1, 9)])).all()
# test Fortran order
assert (np.array(bounding_box.domain(0.25, 'F')) ==
np.array([np.linspace(-1, 1, 9), np.linspace(0, 2, 9)])).all()
# test error order
order = mk.MagicMock()
with pytest.raises(ValueError):
bounding_box.domain(0.25, order)
def test__outside(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [False for _ in range(13)]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index ==
[True, True, True, True,
False, False, False, False, False,
True, True, True, True]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [True for _ in range(13)]).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [False]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [True]).all()
assert all_out and isinstance(all_out, bool)
def test__valid_index(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [idx for idx in range(13)]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [4, 5, 6, 7, 8]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [0]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
def test_prepare_inputs(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert (np.array(new_inputs) == np.array(inputs)).all()
assert len(valid_index) == 1
assert (valid_index[0] == [idx for idx in range(13)]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert (np.array(new_inputs) ==
np.array(
[
[x[4], x[5], x[6], x[7], x[8]],
[y[4], y[5], y[6], y[7], y[8]],
]
)).all()
assert len(valid_index) == 1
assert (valid_index[0] == [4, 5, 6, 7, 8]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert new_inputs == ()
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert (np.array(new_inputs) == np.array([[0.5], [0.5]])).all()
assert len(valid_index) == 1
assert (valid_index[0] == [0]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert new_inputs == ()
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
def test_bounding_box_ignore(self):
"""Regression test for #13028"""
bbox_x = ModelBoundingBox((9, 10), Polynomial2D(1), ignored=["x"])
assert bbox_x.ignored_inputs == ['x']
bbox_y = ModelBoundingBox((11, 12), Polynomial2D(1), ignored=["y"])
assert bbox_y.ignored_inputs == ['y']
class Test_SelectorArgument:
def test_create(self):
index = mk.MagicMock()
ignore = mk.MagicMock()
argument = _SelectorArgument(index, ignore)
assert isinstance(argument, _BaseSelectorArgument)
assert argument.index == index
assert argument.ignore == ignore
assert argument == (index, ignore)
def test_validate(self):
model = Gaussian2D()
# default integer
assert _SelectorArgument.validate(model, 0) == (0, True)
assert _SelectorArgument.validate(model, 1) == (1, True)
# default string
assert _SelectorArgument.validate(model, 'x') == (0, True)
assert _SelectorArgument.validate(model, 'y') == (1, True)
ignore = mk.MagicMock()
# non-default integer
assert _SelectorArgument.validate(model, 0, ignore) == (0, ignore)
assert _SelectorArgument.validate(model, 1, ignore) == (1, ignore)
# non-default string
assert _SelectorArgument.validate(model, 'x', ignore) == (0, ignore)
assert _SelectorArgument.validate(model, 'y', ignore) == (1, ignore)
# Fail
with pytest.raises(ValueError):
_SelectorArgument.validate(model, 'z')
with pytest.raises(ValueError):
_SelectorArgument.validate(model, mk.MagicMock())
with pytest.raises(IndexError):
_SelectorArgument.validate(model, 2)
def test_get_selector(self):
# single inputs
inputs = [idx + 17 for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == inputs[index]
# numpy array of single inputs
inputs = [np.array([idx + 11]) for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == inputs[index]
inputs = [np.asanyarray(idx + 13) for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == inputs[index]
# multi entry numpy array
inputs = [np.array([idx + 27, idx - 31]) for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index,
mk.MagicMock()).get_selector(*inputs) == tuple(inputs[index])
def test_name(self):
model = Gaussian2D()
for index in range(model.n_inputs):
assert _SelectorArgument(index, mk.MagicMock()).name(model) == model.inputs[index]
def test_pretty_repr(self):
model = Gaussian2D()
assert _SelectorArgument(0, False).pretty_repr(model) == "Argument(name='x', ignore=False)"
assert _SelectorArgument(0, True).pretty_repr(model) == "Argument(name='x', ignore=True)"
assert _SelectorArgument(1, False).pretty_repr(model) == "Argument(name='y', ignore=False)"
assert _SelectorArgument(1, True).pretty_repr(model) == "Argument(name='y', ignore=True)"
def test_get_fixed_value(self):
model = Gaussian2D()
values = {0: 5, 'y': 7}
# Get index value
assert _SelectorArgument(0, mk.MagicMock()).get_fixed_value(model, values) == 5
# Get name value
assert _SelectorArgument(1, mk.MagicMock()).get_fixed_value(model, values) == 7
# Fail
values = {0: 5}
with pytest.raises(RuntimeError) as err:
_SelectorArgument(1, True).get_fixed_value(model, values)
assert str(err.value) == "Argument(name='y', ignore=True) was not found in {0: 5}"
def test_is_argument(self):
model = Gaussian2D()
argument = _SelectorArgument.validate(model, 0)
# Is true
assert argument.is_argument(model, 0) is True
assert argument.is_argument(model, 'x') is True
# Is false
assert argument.is_argument(model, 1) is False
assert argument.is_argument(model, 'y') is False
# Fail
with pytest.raises(ValueError):
argument.is_argument(model, 'z')
with pytest.raises(ValueError):
argument.is_argument(model, mk.MagicMock())
with pytest.raises(IndexError):
argument.is_argument(model, 2)
def test_named_tuple(self):
model = Gaussian2D()
for index in range(model.n_inputs):
ignore = mk.MagicMock()
assert _SelectorArgument(index, ignore).named_tuple(model) == (model.inputs[index],
ignore)
class Test_SelectorArguments:
def test_create(self):
arguments = _SelectorArguments((_SelectorArgument(0, True), _SelectorArgument(1, False)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments._kept_ignore == []
kept_ignore = mk.MagicMock()
arguments = _SelectorArguments((_SelectorArgument(0, True),
_SelectorArgument(1, False)), kept_ignore)
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments._kept_ignore == kept_ignore
def test_pretty_repr(self):
model = Gaussian2D()
arguments = _SelectorArguments((_SelectorArgument(0, True), _SelectorArgument(1, False)))
assert arguments.pretty_repr(model) == (
"SelectorArguments(\n"
" Argument(name='x', ignore=True)\n"
" Argument(name='y', ignore=False)\n"
")"
)
def test_ignore(self):
assert _SelectorArguments((_SelectorArgument(0, True),
_SelectorArgument(1, True))).ignore == [0, 1]
assert _SelectorArguments((_SelectorArgument(0, True),
_SelectorArgument(1, True)), [13, 4]).ignore == [0, 1, 13, 4]
assert _SelectorArguments((_SelectorArgument(0, True),
_SelectorArgument(1, False))).ignore == [0]
assert _SelectorArguments((_SelectorArgument(0, False),
_SelectorArgument(1, True))).ignore == [1]
assert _SelectorArguments((_SelectorArgument(0, False),
_SelectorArgument(1, False))).ignore == []
assert _SelectorArguments((_SelectorArgument(0, False),
_SelectorArgument(1, False)), [17, 14]).ignore == [17, 14]
def test_validate(self):
# Integer key and passed ignore
arguments = _SelectorArguments.validate(Gaussian2D(), ((0, True), (1, False)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments.kept_ignore == []
# Default ignore
arguments = _SelectorArguments.validate(Gaussian2D(), ((0,), (1,)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, True))
assert arguments.kept_ignore == []
# String key and passed ignore
arguments = _SelectorArguments.validate(Gaussian2D(), (('x', True), ('y', False)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments.kept_ignore == []
# Test kept_ignore option
new_arguments = _SelectorArguments.validate(Gaussian2D(), arguments, [11, 5, 8])
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True), (1, False))
assert new_arguments.kept_ignore == [11, 5, 8]
arguments._kept_ignore = [13, 17, 14]
new_arguments = _SelectorArguments.validate(Gaussian2D(), arguments)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True), (1, False))
assert new_arguments.kept_ignore == [13, 17, 14]
# Invalid, bad argument
with pytest.raises(ValueError):
_SelectorArguments.validate(Gaussian2D(), ((0, True), ('z', False)))
with pytest.raises(ValueError):
_SelectorArguments.validate(Gaussian2D(), ((mk.MagicMock(), True), (1, False)))
with pytest.raises(IndexError):
_SelectorArguments.validate(Gaussian2D(), ((0, True), (2, False)))
# Invalid, repeated argument
with pytest.raises(ValueError) as err:
_SelectorArguments.validate(Gaussian2D(), ((0, True), (0, False)))
assert str(err.value) == "Input: 'x' has been repeated."
# Invalid, no arguments
with pytest.raises(ValueError) as err:
_SelectorArguments.validate(Gaussian2D(), ())
assert str(err.value) == "There must be at least one selector argument."
def test_get_selector(self):
inputs = [idx + 19 for idx in range(4)]
assert _SelectorArguments.validate(Gaussian2D(),
((0, True),
(1, False))).get_selector(*inputs) == tuple(inputs[:2])
assert _SelectorArguments.validate(Gaussian2D(),
((1, True),
(0, False))).get_selector(*inputs) == tuple(inputs[:2][::-1]) # noqa: E501
assert _SelectorArguments.validate(Gaussian2D(),
((1, False),)).get_selector(*inputs) == (inputs[1],)
assert _SelectorArguments.validate(Gaussian2D(),
((0, True),)).get_selector(*inputs) == (inputs[0],)
def test_is_selector(self):
# Is Selector
assert _SelectorArguments.validate(Gaussian2D(),
((0, True), (1, False))).is_selector((0.5, 2.5))
assert _SelectorArguments.validate(Gaussian2D(),
((0, True),)).is_selector((0.5,))
# Is not selector
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True), (1, False))).is_selector((0.5, 2.5, 3.5))
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True), (1, False))).is_selector((0.5,))
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True), (1, False))).is_selector(0.5)
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True),)).is_selector((0.5, 2.5))
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True),)).is_selector(2.5)
def test_get_fixed_values(self):
model = Gaussian2D()
assert _SelectorArguments.validate(model, ((0, True), (1, False))).get_fixed_values(
model, {0: 11, 1: 7}) == (11, 7)
assert _SelectorArguments.validate(model, ((0, True), (1, False))).get_fixed_values(
model, {0: 5, 'y': 47}) == (5, 47)
assert _SelectorArguments.validate(model, ((0, True), (1, False))).get_fixed_values(
model, {'x': 2, 'y': 9}) == (2, 9)
assert _SelectorArguments.validate(model, ((0, True), (1, False))).get_fixed_values(
model, {'x': 12, 1: 19}) == (12, 19)
def test_is_argument(self):
model = Gaussian2D()
# Is true
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.is_argument(model, 0) is True
assert arguments.is_argument(model, 'x') is True
assert arguments.is_argument(model, 1) is True
assert arguments.is_argument(model, 'y') is True
# Is true and false
arguments = _SelectorArguments.validate(model, ((0, True),))
assert arguments.is_argument(model, 0) is True
assert arguments.is_argument(model, 'x') is True
assert arguments.is_argument(model, 1) is False
assert arguments.is_argument(model, 'y') is False
arguments = _SelectorArguments.validate(model, ((1, False),))
assert arguments.is_argument(model, 0) is False
assert arguments.is_argument(model, 'x') is False
assert arguments.is_argument(model, 1) is True
assert arguments.is_argument(model, 'y') is True
def test_selector_index(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.selector_index(model, 0) == 0
assert arguments.selector_index(model, 'x') == 0
assert arguments.selector_index(model, 1) == 1
assert arguments.selector_index(model, 'y') == 1
arguments = _SelectorArguments.validate(model, ((1, True), (0, False)))
assert arguments.selector_index(model, 0) == 1
assert arguments.selector_index(model, 'x') == 1
assert arguments.selector_index(model, 1) == 0
assert arguments.selector_index(model, 'y') == 0
# Error
arguments = _SelectorArguments.validate(model, ((0, True),))
with pytest.raises(ValueError) as err:
arguments.selector_index(model, 'y')
assert str(err.value) == "y does not correspond to any selector argument."
def test_add_ignore(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), ))
assert arguments == ((0, True),)
assert arguments._kept_ignore == []
new_arguments0 = arguments.add_ignore(model, 1)
assert new_arguments0 == arguments
assert new_arguments0._kept_ignore == [1]
assert arguments._kept_ignore == []
assert arguments._kept_ignore == []
new_arguments1 = new_arguments0.add_ignore(model, 'y')
assert new_arguments1 == arguments == new_arguments0
assert new_arguments0._kept_ignore == [1]
assert new_arguments1._kept_ignore == [1, 1]
assert arguments._kept_ignore == []
# Error
with pytest.raises(ValueError) as err:
arguments.add_ignore(model, 0)
assert str(err.value) == "0: is a selector argument and cannot be ignored."
def test_reduce(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
new_arguments = arguments.reduce(model, 0)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((1, False),)
assert new_arguments._kept_ignore == [0]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, 'x')
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((1, False),)
assert new_arguments._kept_ignore == [0]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, 1)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True),)
assert new_arguments._kept_ignore == [1]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, 'y')
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True),)
assert new_arguments._kept_ignore == [1]
assert arguments._kept_ignore == []
def test_named_tuple(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.named_tuple(model) == (('x', True), ('y', False))
class TestCompoundBoundingBox:
def test_create(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox(bounding_boxes, model,
selector_args, create_selector, order='F')
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
for _selector, bbox in bounding_boxes.items():
assert _selector in bounding_box._bounding_boxes
assert bounding_box._bounding_boxes[_selector] == bbox
for _selector, bbox in bounding_box._bounding_boxes.items():
assert _selector in bounding_boxes
assert bounding_boxes[_selector] == bbox
assert isinstance(bbox, ModelBoundingBox)
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == 'F'
def test_copy(self):
bounding_box = CompoundBoundingBox.validate(Gaussian2D(),
{(1,): (-1.5, 1.3), (2,): (-2.7, 2.4)},
((0, True),), mk.MagicMock())
copy = bounding_box.copy()
assert bounding_box == copy
assert id(bounding_box) != id(copy)
# model is not copied to prevent infinite recursion
assert bounding_box._model == copy._model
assert id(bounding_box._model) == id(copy._model)
# Same string values have will have same id
assert bounding_box._order == copy._order
assert id(bounding_box._order) == id(copy._order)
assert bounding_box._create_selector == copy._create_selector
assert id(bounding_box._create_selector) != id(copy._create_selector)
# Check selector_args
for index, argument in enumerate(bounding_box.selector_args):
assert argument == copy.selector_args[index]
assert id(argument) != id(copy.selector_args[index])
# Same integer values have will have same id
assert argument.index == copy.selector_args[index].index
assert id(argument.index) == id(copy.selector_args[index].index)
# Same boolean values have will have same id
assert argument.ignore == copy.selector_args[index].ignore
assert id(argument.ignore) == id(copy.selector_args[index].ignore)
assert len(bounding_box.selector_args) == len(copy.selector_args)
# Check bounding_boxes
for selector, bbox in bounding_box.bounding_boxes.items():
assert bbox == copy.bounding_boxes[selector]
assert id(bbox) != id(copy.bounding_boxes[selector])
assert bbox.ignored == copy.bounding_boxes[selector].ignored
assert id(bbox.ignored) != id(copy.bounding_boxes[selector].ignored)
# model is not copied to prevent infinite recursion
assert bbox._model == copy.bounding_boxes[selector]._model
assert id(bbox._model) == id(copy.bounding_boxes[selector]._model)
# Same string values have will have same id
assert bbox._order == copy.bounding_boxes[selector]._order
assert id(bbox._order) == id(copy.bounding_boxes[selector]._order)
# Check interval objects
for index, interval in bbox.intervals.items():
assert interval == copy.bounding_boxes[selector].intervals[index]
assert id(interval) != id(copy.bounding_boxes[selector].intervals[index])
# Same float values have will have same id
assert interval.lower == copy.bounding_boxes[selector].intervals[index].lower
assert id(interval.lower) == id(copy.bounding_boxes[selector].intervals[index].lower) # noqa: E501
# Same float values have will have same id
assert interval.upper == copy.bounding_boxes[selector].intervals[index].upper
assert id(interval.upper) == id(copy.bounding_boxes[selector].intervals[index].upper) # noqa: E501
assert len(bbox.intervals) == len(copy.bounding_boxes[selector].intervals)
assert bbox.intervals.keys() == copy.bounding_boxes[selector].intervals.keys()
assert len(bounding_box.bounding_boxes) == len(copy.bounding_boxes)
assert bounding_box.bounding_boxes.keys() == copy.bounding_boxes.keys()
def test___repr__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert bounding_box.__repr__() == (
"CompoundBoundingBox(\n"
" bounding_boxes={\n"
" (1,) = ModelBoundingBox(\n"
" intervals={\n"
" y: Interval(lower=-1, upper=1)\n"
" }\n"
" ignored=['x']\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
" )\n"
" (2,) = ModelBoundingBox(\n"
" intervals={\n"
" y: Interval(lower=-2, upper=2)\n"
" }\n"
" ignored=['x']\n"
" model=Gaussian2D(inputs=('x', 'y'))\n"
" order='C'\n"
" )\n"
" }\n"
" selector_args = SelectorArguments(\n"
" Argument(name='x', ignore=True)\n"
" )\n"
")"
)
def test_bounding_boxes(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box.bounding_boxes == bounding_boxes
def test_selector_args(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_box = CompoundBoundingBox({}, model, selector_args)
# Get
assert bounding_box._selector_args == selector_args
assert bounding_box.selector_args == selector_args
# Set
selector_args = ((1, False),)
with pytest.warns(RuntimeWarning, match=r"Overriding selector_args.*"):
bounding_box.selector_args = selector_args
assert bounding_box._selector_args == selector_args
assert bounding_box.selector_args == selector_args
def test_create_selector(self):
model = Gaussian2D()
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox({}, model, ((1,),), create_selector)
assert bounding_box._create_selector == create_selector
assert bounding_box.create_selector == create_selector
def test__get_selector_key(self):
bounding_box = CompoundBoundingBox({}, Gaussian2D(), ((1, True),))
assert len(bounding_box.bounding_boxes) == 0
# Singlar
assert bounding_box._get_selector_key(5) == (5,)
assert bounding_box._get_selector_key((5,)) == (5,)
assert bounding_box._get_selector_key([5]) == (5,)
assert bounding_box._get_selector_key(np.asanyarray(5)) == (5,)
assert bounding_box._get_selector_key(np.array([5])) == (5,)
# multiple
assert bounding_box._get_selector_key((5, 19)) == (5, 19)
assert bounding_box._get_selector_key([5, 19]) == (5, 19)
assert bounding_box._get_selector_key(np.array([5, 19])) == (5, 19)
def test___setitem__(self):
model = Gaussian2D()
# Ignored argument
bounding_box = CompoundBoundingBox({}, model, ((1, True),), order='F')
assert len(bounding_box.bounding_boxes) == 0
# Valid
bounding_box[(15, )] = (-15, 15)
assert len(bounding_box.bounding_boxes) == 1
assert (15,) in bounding_box._bounding_boxes
assert isinstance(bounding_box._bounding_boxes[(15,)], ModelBoundingBox)
assert bounding_box._bounding_boxes[(15,)] == (-15, 15)
assert bounding_box._bounding_boxes[(15,)].order == 'F'
# Invalid key
assert (7, 13) not in bounding_box._bounding_boxes
with pytest.raises(ValueError) as err:
bounding_box[(7, 13)] = (-7, 7)
assert str(err.value) == "(7, 13) is not a selector!"
assert (7, 13) not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# Invalid bounding box
assert 13 not in bounding_box._bounding_boxes
with pytest.raises(ValueError):
bounding_box[(13,)] = ((-13, 13), (-3, 3))
assert 13 not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# No ignored argument
bounding_box = CompoundBoundingBox({}, model, ((1, False),), order='F')
assert len(bounding_box.bounding_boxes) == 0
# Valid
bounding_box[(15, )] = ((-15, 15), (-6, 6))
assert len(bounding_box.bounding_boxes) == 1
assert (15,) in bounding_box._bounding_boxes
assert isinstance(bounding_box._bounding_boxes[(15,)], ModelBoundingBox)
assert bounding_box._bounding_boxes[(15,)] == ((-15, 15), (-6, 6))
assert bounding_box._bounding_boxes[(15,)].order == 'F'
# Invalid key
assert (14, 11) not in bounding_box._bounding_boxes
with pytest.raises(ValueError) as err:
bounding_box[(14, 11)] = ((-7, 7), (-12, 12))
assert str(err.value) == "(14, 11) is not a selector!"
assert (14, 11) not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# Invalid bounding box
assert 13 not in bounding_box._bounding_boxes
with pytest.raises(ValueError):
bounding_box[(13,)] = (-13, 13)
assert 13 not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
def test__validate(self):
model = Gaussian2D()
selector_args = ((0, True),)
# Tuple selector_args
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox({}, model, selector_args)
bounding_box._validate(bounding_boxes)
for _selector, bbox in bounding_boxes.items():
assert _selector in bounding_box._bounding_boxes
assert bounding_box._bounding_boxes[_selector] == bbox
for _selector, bbox in bounding_box._bounding_boxes.items():
assert _selector in bounding_boxes
assert bounding_boxes[_selector] == bbox
assert isinstance(bbox, ModelBoundingBox)
assert bounding_box._bounding_boxes == bounding_boxes
def test___eq__(self):
bounding_box_1 = CompoundBoundingBox({(1,): (-1, 1), (2,): (-2, 2)},
Gaussian2D(), ((0, True),))
bounding_box_2 = CompoundBoundingBox({(1,): (-1, 1), (2,): (-2, 2)},
Gaussian2D(), ((0, True),))
# Equal
assert bounding_box_1 == bounding_box_2
# Not equal to non-compound bounding_box
assert not bounding_box_1 == mk.MagicMock()
assert not bounding_box_2 == mk.MagicMock()
# Not equal bounding_boxes
bounding_box_2[(15,)] = (-15, 15)
assert not bounding_box_1 == bounding_box_2
del bounding_box_2._bounding_boxes[(15,)]
assert bounding_box_1 == bounding_box_2
# Not equal selector_args
bounding_box_2._selector_args = _SelectorArguments.validate(Gaussian2D(), ((0, False),))
assert not bounding_box_1 == bounding_box_2
bounding_box_2._selector_args = _SelectorArguments.validate(Gaussian2D(), ((0, True),))
assert bounding_box_1 == bounding_box_2
# Not equal create_selector
bounding_box_2._create_selector = mk.MagicMock()
assert not bounding_box_1 == bounding_box_2
def test_validate(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
create_selector = mk.MagicMock()
# Fail selector_args
with pytest.raises(ValueError) as err:
CompoundBoundingBox.validate(model, bounding_boxes)
assert str(err.value) == ("Selector arguments must be provided "
"(can be passed as part of bounding_box argument)")
# Normal validate
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes, selector_args,
create_selector, order='F')
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == 'F'
# Re-validate
new_bounding_box = CompoundBoundingBox.validate(model, bounding_box)
assert bounding_box == new_bounding_box
assert new_bounding_box._order == 'F'
# Default order
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes, selector_args,
create_selector)
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == 'C'
def test___contains__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert (1,) in bounding_box
assert (2,) in bounding_box
assert (3,) not in bounding_box
assert 1 not in bounding_box
assert 2 not in bounding_box
def test__create_bounding_box(self):
model = Gaussian2D()
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox({}, model, ((1, False),),
create_selector)
# Create is successful
create_selector.return_value = ((-15, 15), (-23, 23))
assert len(bounding_box._bounding_boxes) == 0
bbox = bounding_box._create_bounding_box((7,))
assert isinstance(bbox, ModelBoundingBox)
assert bbox == ((-15, 15), (-23, 23))
assert len(bounding_box._bounding_boxes) == 1
assert (7,) in bounding_box
assert isinstance(bounding_box[(7,)], ModelBoundingBox)
assert bounding_box[(7,)] == bbox
# Create is unsuccessful
create_selector.return_value = (-42, 42)
with pytest.raises(ValueError):
bounding_box._create_bounding_box((27,))
def test___getitem__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
# already exists
assert isinstance(bounding_box[1], ModelBoundingBox)
assert bounding_box[1] == (-1, 1)
assert isinstance(bounding_box[(2,)], ModelBoundingBox)
assert bounding_box[2] == (-2, 2)
assert isinstance(bounding_box[(1,)], ModelBoundingBox)
assert bounding_box[(1,)] == (-1, 1)
assert isinstance(bounding_box[(2,)], ModelBoundingBox)
assert bounding_box[(2,)] == (-2, 2)
# no selector
with pytest.raises(RuntimeError) as err:
bounding_box[(3,)]
assert str(err.value) == "No bounding box is defined for selector: (3,)."
# Create a selector
bounding_box._create_selector = mk.MagicMock()
with mk.patch.object(CompoundBoundingBox, '_create_bounding_box',
autospec=True) as mkCreate:
assert bounding_box[(3,)] == mkCreate.return_value
assert mkCreate.call_args_list == [mk.call(bounding_box, (3,))]
def test__select_bounding_box(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
inputs = [mk.MagicMock() for _ in range(3)]
with mk.patch.object(_SelectorArguments, 'get_selector',
autospec=True) as mkSelector:
with mk.patch.object(CompoundBoundingBox, '__getitem__',
autospec=True) as mkGet:
assert bounding_box._select_bounding_box(inputs) == mkGet.return_value
assert mkGet.call_args_list == [mk.call(bounding_box, mkSelector.return_value)]
assert mkSelector.call_args_list == [mk.call(bounding_box.selector_args, *inputs)]
def test_prepare_inputs(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
input_shape = mk.MagicMock()
with mk.patch.object(ModelBoundingBox, 'prepare_inputs',
autospec=True) as mkPrepare:
assert bounding_box.prepare_inputs(input_shape, [1, 2, 3]) == mkPrepare.return_value
assert mkPrepare.call_args_list == [mk.call(bounding_box[(1,)], input_shape, [1, 2, 3])]
mkPrepare.reset_mock()
assert bounding_box.prepare_inputs(input_shape, [2, 2, 3]) == mkPrepare.return_value
assert mkPrepare.call_args_list == [mk.call(bounding_box[(2,)], input_shape, [2, 2, 3])]
mkPrepare.reset_mock()
def test__matching_bounding_boxes(self):
# Single selector index
selector_args = ((0, False),)
bounding_boxes = {
(1,): ((-1, 1), (-2, 2)),
(2,): ((-2, 2), (-3, 3)),
(3,): ((-3, 3), (-4, 4))
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
matching = bounding_box._matching_bounding_boxes('x', value)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert 'x' in bbox
assert 'x' in bbox.ignored_inputs
assert 'y' in bbox
assert bbox['y'] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
# Multiple selector index
selector_args = ((0, False), (1, False))
bounding_boxes = {
(1, 3): ((-1, 1), (-2, 2)),
(2, 2): ((-2, 2), (-3, 3)),
(3, 1): ((-3, 3), (-4, 4))
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
matching = bounding_box._matching_bounding_boxes('x', value)
assert isinstance(matching, dict)
assert (4 - value,) in matching
bbox = matching[(4 - value,)]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert 'x' in bbox
assert 'x' in bbox.ignored_inputs
assert 'y' in bbox
assert bbox['y'] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
matching = bounding_box._matching_bounding_boxes('y', value)
assert isinstance(matching, dict)
assert (4 - value,) in matching
bbox = matching[(4 - value,)]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert 'y' in bbox
assert 'y' in bbox.ignored_inputs
assert 'x' in bbox
assert bbox['x'] == (-(5 - value), (5 - value))
assert len(bbox.intervals) == 1
assert bbox.ignored == [1]
# Real fix input of slicing input
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5))
}
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes,
selector_args=[('slit_id', True)], order='F')
matching = bounding_box._matching_bounding_boxes('slit_id', 0)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ['slit_id']
assert bbox.named_intervals == {'x': (-0.5, 1047.5),
'y': (-0.5, 2047.5)}
assert bbox.order == 'F'
matching = bounding_box._matching_bounding_boxes('slit_id', 1)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ['slit_id']
assert bbox.named_intervals == {'x': (-0.5, 3047.5),
'y': (-0.5, 4047.5)}
assert bbox.order == 'F'
# Errors
with pytest.raises(ValueError) as err:
bounding_box._matching_bounding_boxes('slit_id', 2)
assert str(err.value) == ("Attempting to fix input slit_id, but "
"there are no bounding boxes for argument value 2.")
def test__fix_input_selector_arg(self):
# Single selector index
selector_args = ((0, False),)
bounding_boxes = {
(1,): ((-1, 1), (-2, 2)),
(2,): ((-2, 2), (-3, 3)),
(3,): ((-3, 3), (-4, 4))
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
bbox = bounding_box._fix_input_selector_arg('x', value)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert 'x' in bbox
assert 'x' in bbox.ignored_inputs
assert 'y' in bbox
assert bbox['y'] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
# Multiple selector index
selector_args = ((0, False), (1, False))
bounding_boxes = {
(1, 3): ((-1, 1), (-2, 2)),
(2, 2): ((-2, 2), (-3, 3)),
(3, 1): ((-3, 3), (-4, 4))
}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
bbox = bounding_box._fix_input_selector_arg('x', value)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert bbox.selector_args == ((1, False),)
assert (4 - value,) in bbox
bbox_selector = bbox[(4 - value,)]
assert isinstance(bbox_selector, ModelBoundingBox)
assert (bbox_selector._model.parameters == Gaussian2D().parameters).all()
assert 'x' in bbox_selector
assert 'x' in bbox_selector.ignored_inputs
assert 'y' in bbox_selector
assert bbox_selector['y'] == (-value, value)
assert len(bbox_selector.intervals) == 1
assert bbox_selector.ignored == [0]
bbox = bounding_box._fix_input_selector_arg('y', value)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert bbox.selector_args == ((0, False),)
assert (4 - value,) in bbox
bbox_selector = bbox[(4 - value,)]
assert isinstance(bbox_selector, ModelBoundingBox)
assert (bbox_selector._model.parameters == Gaussian2D().parameters).all()
assert 'y' in bbox_selector
assert 'y' in bbox_selector.ignored_inputs
assert 'x' in bbox_selector
assert bbox_selector['x'] == (-(5 - value), (5 - value))
assert len(bbox_selector.intervals) == 1
assert bbox_selector.ignored == [1]
# Real fix input of slicing input
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5))
}
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes,
selector_args=[('slit_id', True)], order='F')
bbox = bounding_box._fix_input_selector_arg('slit_id', 0)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ['slit_id']
assert bbox.named_intervals == {'x': (-0.5, 1047.5),
'y': (-0.5, 2047.5)}
assert bbox.order == 'F'
bbox = bounding_box._fix_input_selector_arg('slit_id', 1)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ['slit_id']
assert bbox.named_intervals == {'x': (-0.5, 3047.5),
'y': (-0.5, 4047.5)}
assert bbox.order == 'F'
def test__fix_input_bbox_arg(self):
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5))
}
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes,
selector_args=[('slit_id', True)], order='F')
bbox = bounding_box._fix_input_bbox_arg('x', 5)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((2, True),)
assert bbox.selector_args._kept_ignore == [0]
assert bbox._bounding_boxes[(0,)] == (-0.5, 2047.5)
assert bbox._bounding_boxes[(1,)] == (-0.5, 4047.5)
assert len(bbox._bounding_boxes) == 2
bbox = bounding_box._fix_input_bbox_arg('y', 5)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((2, True),)
assert bbox.selector_args._kept_ignore == [1]
assert bbox._bounding_boxes[(0,)] == (-0.5, 1047.5)
assert bbox._bounding_boxes[(1,)] == (-0.5, 3047.5)
assert len(bbox._bounding_boxes) == 2
def test_fix_inputs(self):
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bounding_boxes = {
(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5))
}
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes,
selector_args=[('slit_id', True)], order='F')
model.bounding_box = bounding_box
# Fix selector argument
new_model = fix_inputs(model, {'slit_id': 0})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {'x': (-0.5, 1047.5),
'y': (-0.5, 2047.5)}
assert bbox.order == 'F'
# Fix a bounding_box field
new_model = fix_inputs(model, {'x': 5})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((1, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-0.5, 2047.5)
assert bbox._bounding_boxes[(0,)].order == 'F'
assert bbox._bounding_boxes[(1,)] == (-0.5, 4047.5)
assert bbox._bounding_boxes[(1,)].order == 'F'
assert len(bbox._bounding_boxes) == 2
new_model = fix_inputs(model, {'y': 5})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((1, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-0.5, 1047.5)
assert bbox._bounding_boxes[(0,)].order == 'F'
assert bbox._bounding_boxes[(1,)] == (-0.5, 3047.5)
assert bbox._bounding_boxes[(1,)].order == 'F'
assert len(bbox._bounding_boxes) == 2
# Fix selector argument and a bounding_box field
new_model = fix_inputs(model, {'slit_id': 0, 'x': 5})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {'y': (-0.5, 2047.5)}
assert bbox.order == 'F'
new_model = fix_inputs(model, {'y': 5, 'slit_id': 1})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {'x': (-0.5, 3047.5)}
assert bbox.order == 'F'
# Fix two bounding_box fields
new_model = fix_inputs(model, {'x': 5, 'y': 7})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert bbox.selector_args == ((0, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-np.inf, np.inf)
assert bbox._bounding_boxes[(0,)].order == 'F'
assert bbox._bounding_boxes[(1,)] == (-np.inf, np.inf)
assert bbox._bounding_boxes[(1,)].order == 'F'
assert len(bbox._bounding_boxes) == 2
def test_complex_compound_bounding_box(self):
model = Identity(4)
bounding_boxes = {
(2.5, 1.3): ((-1, 1), (-3, 3)),
(2.5, 2.71): ((-3, 3), (-1, 1))
}
selector_args = (('x0', True), ('x1', True))
bbox = CompoundBoundingBox.validate(model, bounding_boxes, selector_args)
assert bbox[(2.5, 1.3)] == ModelBoundingBox(((-1, 1), (-3, 3)),
model, ignored=['x0', 'x1'])
assert bbox[(2.5, 2.71)] == ModelBoundingBox(((-3, 3), (-1, 1)),
model, ignored=['x0', 'x1'])
|
4f8394826c8e983c42d3316897437e550af0ebc690b750790a965112f19214d9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name, pointless-statement
import pickle
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
import astropy.units as u
from astropy.modeling.core import CompoundModel, Model, ModelDefinitionError
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.modeling.models import (
Chebyshev1D, Chebyshev2D, Const1D, Gaussian1D, Gaussian2D, Identity, Legendre1D, Legendre2D,
Linear1D, Mapping, Polynomial1D, Polynomial2D, Rotation2D, Scale, Shift, Tabular1D, fix_inputs)
from astropy.modeling.parameters import Parameter
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa: F401
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, [5.0, 5.0]),
(lambda x, y: x - y, [-1.0, -1.0]),
(lambda x, y: x * y, [6.0, 6.0]),
(lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]),
(lambda x, y: x ** y, [8.0, 8.0])])
def test_model_set(expr, result):
s = expr(Const1D((2, 2), n_models=2), Const1D((3, 3), n_models=2))
out = s(0, model_set_axis=False)
assert_array_equal(out, result)
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, [5.0, 5.0]),
(lambda x, y: x - y, [-1.0, -1.0]),
(lambda x, y: x * y, [6.0, 6.0]),
(lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]),
(lambda x, y: x ** y, [8.0, 8.0])])
def test_model_set_raises_value_error(expr, result):
"""Check that creating model sets with components whose _n_models are
different raise a value error
"""
with pytest.raises(ValueError):
expr(Const1D((2, 2), n_models=2), Const1D(3, n_models=1))
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, 5.0),
(lambda x, y: x - y, -1.0),
(lambda x, y: x * y, 6.0),
(lambda x, y: x / y, 2.0 / 3.0),
(lambda x, y: x ** y, 8.0)])
def test_two_model_instance_arithmetic_1d(expr, result):
"""
Like test_two_model_class_arithmetic_1d, but creates a new model from two
model *instances* with fixed parameters.
"""
s = expr(Const1D(2), Const1D(3))
assert isinstance(s, CompoundModel)
assert s.n_inputs == 1
assert s.n_outputs == 1
out = s(0)
assert out == result
assert isinstance(out, float)
def test_simple_two_model_compose_1d():
"""
Shift and Scale are two of the simplest models to test model composition
with.
"""
S1 = Shift(2) | Scale(3) # First shift then scale
assert isinstance(S1, CompoundModel)
assert S1.n_inputs == 1
assert S1.n_outputs == 1
assert S1(1) == 9.0
S2 = Scale(2) | Shift(3) # First scale then shift
assert isinstance(S2, CompoundModel)
assert S2.n_inputs == 1
assert S2.n_outputs == 1
assert S2(1) == 5.0
# Test with array inputs
assert_array_equal(S2([1, 2, 3]), [5.0, 7.0, 9.0])
def test_simple_two_model_compose_2d():
"""
A simple example consisting of two rotations.
"""
r1 = Rotation2D(45) | Rotation2D(45)
assert isinstance(r1, CompoundModel)
assert r1.n_inputs == 2
assert r1.n_outputs == 2
assert_allclose(r1(0, 1), (-1, 0), atol=1e-10)
r2 = Rotation2D(90) | Rotation2D(90) # Rotate twice by 90 degrees
assert_allclose(r2(0, 1), (0, -1), atol=1e-10)
# Compose R with itself to produce 4 rotations
r3 = r1 | r1
assert_allclose(r3(0, 1), (0, -1), atol=1e-10)
def test_n_submodels():
"""
Test that CompoundModel.n_submodels properly returns the number
of components.
"""
g2 = Gaussian1D() + Gaussian1D()
assert g2.n_submodels == 2
g3 = g2 + Gaussian1D()
assert g3.n_submodels == 3
g5 = g3 | g2
assert g5.n_submodels == 5
g7 = g5 / g2
assert g7.n_submodels == 7
def test_expression_formatting():
"""
Test that the expression strings from compound models are formatted
correctly.
"""
# For the purposes of this test it doesn't matter a great deal what
# model(s) are used in the expression, I don't think
G = Gaussian1D(1, 1, 1)
G2 = Gaussian2D(1, 2, 3, 4, 5, 6)
M = G + G
assert M._format_expression() == '[0] + [1]'
M = G + G + G
assert M._format_expression() == '[0] + [1] + [2]'
M = G + G * G
assert M._format_expression() == '[0] + [1] * [2]'
M = G * G + G
assert M._format_expression() == '[0] * [1] + [2]'
M = G + G * G + G
assert M._format_expression() == '[0] + [1] * [2] + [3]'
M = (G + G) * (G + G)
assert M._format_expression() == '([0] + [1]) * ([2] + [3])'
# This example uses parentheses in the expression, but those won't be
# preserved in the expression formatting since they technically aren't
# necessary, and there's no way to know that they were originally
# parenthesized (short of some deep, and probably not worthwhile
# introspection)
M = (G * G) + (G * G)
assert M._format_expression() == '[0] * [1] + [2] * [3]'
M = G ** G
assert M._format_expression() == '[0] ** [1]'
M = G + G ** G
assert M._format_expression() == '[0] + [1] ** [2]'
M = (G + G) ** G
assert M._format_expression() == '([0] + [1]) ** [2]'
M = G + G | G
assert M._format_expression() == '[0] + [1] | [2]'
M = G + (G | G)
assert M._format_expression() == '[0] + ([1] | [2])'
M = G & G | G2
assert M._format_expression() == '[0] & [1] | [2]'
M = G & (G | G)
assert M._format_expression() == '[0] & ([1] | [2])'
def test_basic_compound_inverse():
"""
Test basic inversion of compound models in the limited sense supported for
models made from compositions and joins only.
"""
t = (Shift(2) & Shift(3)) | (Scale(2) & Scale(3)) | Rotation2D(90)
assert_allclose(t.inverse(*t(0, 1)), (0, 1))
@pytest.mark.parametrize('model', [
Shift(0) + Shift(0) | Shift(0),
Shift(0) - Shift(0) | Shift(0),
Shift(0) * Shift(0) | Shift(0),
Shift(0) / Shift(0) | Shift(0),
Shift(0) ** Shift(0) | Shift(0),
Gaussian1D(1, 2, 3) | Gaussian1D(4, 5, 6)])
def test_compound_unsupported_inverse(model):
"""
Ensure inverses aren't supported in cases where it shouldn't be.
"""
with pytest.raises(NotImplementedError):
model.inverse
def test_mapping_basic_permutations():
"""
Tests a couple basic examples of the Mapping model--specifically examples
that merely permute the outputs.
"""
x, y = Rotation2D(90)(1, 2)
rs = Rotation2D(90) | Mapping((1, 0))
x_prime, y_prime = rs(1, 2)
assert_allclose((x, y), (y_prime, x_prime))
# A more complicated permutation
m = Rotation2D(90) & Scale(2)
x, y, z = m(1, 2, 3)
ms = m | Mapping((2, 0, 1))
x_prime, y_prime, z_prime = ms(1, 2, 3)
assert_allclose((x, y, z), (y_prime, z_prime, x_prime))
def test_mapping_inverse():
"""Tests inverting a compound model that includes a `Mapping`."""
rs1 = Rotation2D(12.1) & Scale(13.2)
rs2 = Rotation2D(14.3) & Scale(15.4)
# Rotates 2 of the coordinates and scales the third--then rotates on a
# different axis and scales on the axis of rotation. No physical meaning
# here just a simple test
m = rs1 | Mapping([2, 0, 1]) | rs2
assert_allclose((0, 1, 2), m.inverse(*m(0, 1, 2)), atol=1e-08)
def test_identity_input():
"""
Test a case where an Identity (or Mapping) model is the first in a chain
of composite models and thus is responsible for handling input broadcasting
properly.
Regression test for https://github.com/astropy/astropy/pull/3362
"""
ident1 = Identity(1)
shift = Shift(1)
rotation = Rotation2D(angle=90)
model = ident1 & shift | rotation
assert_allclose(model(1, 2), [-3.0, 1.0])
def test_invalid_operands():
"""
Test that certain operators do not work with models whose inputs/outputs do
not match up correctly.
"""
with pytest.raises(ModelDefinitionError):
Rotation2D(90) | Gaussian1D(1, 0, 0.1)
with pytest.raises(ModelDefinitionError):
Rotation2D(90) + Gaussian1D(1, 0, 0.1)
@pytest.mark.parametrize('poly', [Chebyshev2D(1, 2), Polynomial2D(2), Legendre2D(1, 2)])
def test_compound_with_polynomials_2d(poly):
"""
Tests that polynomials are scaled when used in compound models.
Issue #3699
"""
poly.parameters = [1, 2, 3, 4, 1, 2]
shift = Shift(3)
model = poly | shift
x, y = np.mgrid[:20, :37]
result_compound = model(x, y)
result = shift(poly(x, y))
assert_allclose(result, result_compound)
def test_fix_inputs():
g1 = Gaussian2D(1, 0, 0, 1, 2)
g2 = Gaussian2D(1.5, .5, -.2, .5, .3)
sg1_1 = fix_inputs(g1, {1: 0})
assert_allclose(sg1_1(0), g1(0, 0))
assert_allclose(sg1_1([0, 1, 3]), g1([0, 1, 3], [0, 0, 0]))
sg1_2 = fix_inputs(g1, {'x': 1})
assert_allclose(sg1_2(1.5), g1(1, 1.5))
gg1 = g1 & g2
sgg1_1 = fix_inputs(gg1, {1: 0.1, 3: 0.2})
assert_allclose(sgg1_1(0, 0), gg1(0, 0.1, 0, 0.2))
sgg1_2 = fix_inputs(gg1, {'x0': -.1, 2: .1})
assert_allclose(sgg1_2(1, 1), gg1(-0.1, 1, 0.1, 1))
assert_allclose(sgg1_2(y0=1, y1=1), gg1(-0.1, 1, 0.1, 1))
def test_fix_inputs_invalid():
g1 = Gaussian2D(1, 0, 0, 1, 2)
with pytest.raises(ValueError):
fix_inputs(g1, {'x0': 0, 0: 0})
with pytest.raises(ValueError):
fix_inputs(g1, (0, 1))
with pytest.raises(ValueError):
fix_inputs(g1, {3: 2})
with pytest.raises(ValueError):
fix_inputs(g1, {np.int32(3): 2})
with pytest.raises(ValueError):
fix_inputs(g1, {np.int64(3): 2})
with pytest.raises(ValueError):
fix_inputs(g1, {'w': 2})
with pytest.raises(ModelDefinitionError):
CompoundModel('#', g1, g1)
with pytest.raises(ValueError):
gg1 = fix_inputs(g1, {0: 1})
gg1(2, y=2)
with pytest.raises(ValueError):
gg1 = fix_inputs(g1, {np.int32(0): 1})
gg1(2, y=2)
with pytest.raises(ValueError):
gg1 = fix_inputs(g1, {np.int64(0): 1})
gg1(2, y=2)
def test_fix_inputs_with_bounding_box():
g1 = Gaussian2D(1, 0, 0, 1, 1)
g2 = Gaussian2D(1, 0, 0, 1, 1)
assert g1.bounding_box == ((-5.5, 5.5), (-5.5, 5.5))
gg1 = g1 & g2
gg1.bounding_box = ((-5.5, 5.5), (-5.4, 5.4), (-5.3, 5.3), (-5.2, 5.2))
assert gg1.bounding_box == ((-5.5, 5.5), (-5.4, 5.4), (-5.3, 5.3), (-5.2, 5.2))
sg = fix_inputs(gg1, {0: 0, 2: 0})
assert sg.bounding_box == ((-5.5, 5.5), (-5.3, 5.3))
g1 = Gaussian1D(10, 3, 1)
g = g1 & g1
g.bounding_box = ((1, 4), (6, 8))
gf = fix_inputs(g, {0: 1})
assert gf.bounding_box == (1, 4)
def test_indexing_on_instance():
"""Test indexing on compound model instances."""
m = Gaussian1D(1, 0, 0.1) + Const1D(2)
assert isinstance(m[0], Gaussian1D)
assert isinstance(m[1], Const1D)
assert m.param_names == ('amplitude_0', 'mean_0', 'stddev_0', 'amplitude_1')
# Test parameter equivalence
assert m[0].amplitude == 1 == m.amplitude_0
assert m[0].mean == 0 == m.mean_0
assert m[0].stddev == 0.1 == m.stddev_0
assert m[1].amplitude == 2 == m.amplitude_1
# Test that parameter value updates are symmetric between the compound
# model and the submodel returned by indexing
const = m[1]
m.amplitude_1 = 42
assert const.amplitude == 42
const.amplitude = 137
assert m.amplitude_1 == 137
# Similar couple of tests, but now where the compound model was created
# from model instances
g = Gaussian1D(1, 2, 3, name='g')
p = Polynomial1D(2, name='p')
m = g + p
assert m[0].name == 'g'
assert m[1].name == 'p'
assert m['g'].name == 'g'
assert m['p'].name == 'p'
poly = m[1]
m.c0_1 = 12345
assert poly.c0 == 12345
poly.c1 = 6789
assert m.c1_1 == 6789
# Test negative indexing
assert isinstance(m[-1], Polynomial1D)
assert isinstance(m[-2], Gaussian1D)
with pytest.raises(IndexError):
m[42]
with pytest.raises(IndexError):
m['foobar']
# Confirm index-by-name works with fix_inputs
g = Gaussian2D(1, 2, 3, 4, 5, name='g')
m = fix_inputs(g, {0: 1})
assert m['g'].name == 'g'
# Test string slicing
A = Const1D(1.1, name='A')
B = Const1D(2.1, name='B')
C = Const1D(3.1, name='C')
M = A + B * C
assert_allclose(M['B':'C'](1), 6.510000000000001)
class _ConstraintsTestA(Model):
stddev = Parameter(default=0, min=0, max=0.3)
mean = Parameter(default=0, fixed=True)
@staticmethod
def evaluate(stddev, mean):
return stddev, mean
class _ConstraintsTestB(Model):
mean = Parameter(default=0, fixed=True)
@staticmethod
def evaluate(mean):
return mean
def test_inherit_constraints():
"""
Various tests for copying of constraint values between compound models and
their members.
Regression test for https://github.com/astropy/astropy/issues/3481
"""
model = (Gaussian1D(bounds={'stddev': (0, 0.3)}, fixed={'mean': True}) +
Gaussian1D(fixed={'mean': True}))
# Lots of assertions in this test as there are multiple interfaces to
# parameter constraints
assert 'stddev_0' in model.bounds
assert model.bounds['stddev_0'] == (0, 0.3)
assert model.stddev_0.bounds == (0, 0.3)
assert 'mean_0' in model.fixed
assert model.fixed['mean_0'] is True
assert model.mean_0.fixed is True
assert 'mean_1' in model.fixed
assert model.fixed['mean_1'] is True
assert model.mean_1.fixed is True
assert model.stddev_0 is model[0].stddev
# Great, all the constraints were inherited properly
# Now what about if we update them through the sub-models?
model.stddev_0.bounds = (0, 0.4)
assert model[0].stddev.bounds == (0, 0.4)
assert model[0].bounds['stddev'] == (0, 0.4)
model.stddev_0.bounds = (0.1, 0.5)
assert model[0].stddev.bounds == (0.1, 0.5)
assert model[0].bounds['stddev'] == (0.1, 0.5)
model[1].mean.fixed = False
assert model.mean_1.fixed is False
assert model[1].mean.fixed is False
# Now turn off syncing of constraints
assert model.bounds['stddev_0'] == (0.1, 0.5)
model.sync_constraints = False
model[0].stddev.bounds = (0, 0.2)
assert model.bounds['stddev_0'] == (0.1, 0.5)
model.sync_constraints = True
assert model.bounds['stddev_0'] == (0, 0.2)
def test_compound_custom_inverse():
"""
Test that a compound model with a custom inverse has that inverse applied
when the inverse of another model, of which it is a component, is computed.
Regression test for https://github.com/astropy/astropy/issues/3542
"""
poly = Polynomial1D(1, c0=1, c1=2)
scale = Scale(1)
shift = Shift(1)
model1 = poly | scale
model1.inverse = poly
# model1 now has a custom inverse (the polynomial itself, ignoring the
# trivial scale factor)
model2 = shift | model1
assert_allclose(model2.inverse(1), (poly | shift.inverse)(1))
# Make sure an inverse is not allowed if the models were combined with the
# wrong operator, or if one of the models doesn't have an inverse defined
with pytest.raises(NotImplementedError):
(shift + model1).inverse
with pytest.raises(NotImplementedError):
(model1 & poly).inverse
def test_pickle_compound():
"""
Regression test for
https://github.com/astropy/astropy/issues/3867#issuecomment-114547228
"""
# Test pickling a compound model instance
g1 = Gaussian1D(1.0, 0.0, 0.1)
g2 = Gaussian1D([2.0, 3.0], [0.0, 0.0], [0.2, 0.3])
m = g1 + g2
m2 = pickle.loads(pickle.dumps(m))
assert m.param_names == m2.param_names
assert m.__class__.__name__ == m2.__class__.__name__
assert np.all(m.parameters == m2.parameters)
assert np.all(m(0) == m2(0))
def test_update_parameters():
offx = Shift(1)
scl = Scale(2)
m = offx | scl
assert m(1) == 4
offx.offset = 42
assert m(1) == 86
m.factor_1 = 100
assert m(1) == 4300
m2 = m | offx
assert m2(1) == 4342
def test_name():
offx = Shift(1)
scl = Scale(2)
m = offx | scl
scl.name = "scale"
assert m.submodel_names == ('None_0', 'scale')
assert m.name is None
m.name = "M"
assert m.name == "M"
m1 = m.rename("M1")
assert m.name == "M1"
assert m1.name == "M1"
def test_name_index():
g1 = Gaussian1D(1, 1, 1)
g2 = Gaussian1D(1, 2, 1)
g = g1 + g2
with pytest.raises(IndexError):
g['bozo']
g1.name = 'bozo'
assert g['bozo'].mean == 1
g2.name = 'bozo'
with pytest.raises(IndexError):
g['bozo']
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_in_compound():
"""
Issue #7411 - evaluate should not change the shape of the output.
"""
t = Tabular1D(points=([1, 5, 7],), lookup_table=[12, 15, 19],
bounds_error=False)
rot = Rotation2D(2)
p = Polynomial1D(1)
x = np.arange(12).reshape((3, 4))
# Create a compound model which does not execute Tabular.__call__,
# but model.evaluate and is followed by a Rotation2D which
# checks the exact shapes.
model = p & t | rot
x1, y1 = model(x, x)
assert x1.ndim == 2
assert y1.ndim == 2
def test_bounding_box():
g = Gaussian2D() + Gaussian2D(2, .5, .1, 2, 3, 0)
g.bounding_box = ((0, 1), (0, .5))
y, x = np.mgrid[0:10, 0:10]
y = y / 3.
x = x / 3.
val = g(x, y, with_bounding_box=True)
compare = np.array([
[2.93738984, 2.93792011, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[2.87857153, 2.88188761, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[2.70492922, 2.71529265, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[2.45969972, 2.47912103, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]])
mask = ~np.isnan(val)
assert_allclose(val[mask], compare[mask])
val2 = g(x+2, y+2, with_bounding_box=True)
assert np.isnan(val2).sum() == 100
# val3 = g(.1, .1, with_bounding_box=True)
@pytest.mark.skipif("not HAS_SCIPY")
def test_bounding_box_with_units():
points = np.arange(5) * u.pix
lt = np.arange(5) * u.AA
t = Tabular1D(points, lt)
assert t(1 * u.pix, with_bounding_box=True) == 1. * u.AA
@pytest.mark.parametrize('poly', [Chebyshev1D(5), Legendre1D(5), Polynomial1D(5)])
def test_compound_with_polynomials_1d(poly):
"""
Tests that polynomials are offset when used in compound models.
Issue #3699
"""
poly.parameters = [1, 2, 3, 4, 1, 2]
shift = Shift(3)
model = poly | shift
x = np.linspace(-5, 5, 10)
result_compound = model(x)
result = shift(poly(x))
assert_allclose(result, result_compound)
assert model.param_names == ('c0_0', 'c1_0', 'c2_0', 'c3_0', 'c4_0', 'c5_0', 'offset_1')
def test_replace_submodel():
"""
Replace a model in a Compound model
"""
S1 = Shift(2, name='shift2') | Scale(3, name='scale3') # First shift then scale
S2 = Scale(2, name='scale2') | Shift(3, name='shift3') # First scale then shift
m = S1 & S2
assert m(1, 2) == (9, 7)
m2 = m.replace_submodel('scale3', Scale(4, name='scale4'))
assert m2(1, 2) == (12, 7)
assert m(1, 2) == (9, 7)
# Check the inverse has been updated
assert m2.inverse(12, 7) == (1, 2)
# Produce the same result by replacing a single model with a compound
m3 = m.replace_submodel('shift2', Shift(2) | Scale(2))
assert m(1, 2) == (9, 7)
assert m3(1, 2) == (18, 7)
# Check the inverse has been updated
assert m3.inverse(18, 7) == (1, 2)
# Test with arithmetic model compunding operator
m = S1 + S2
assert m(1) == 14
m2 = m.replace_submodel('scale2', Scale(4, name='scale4'))
assert m2(1) == 16
# Test with fix_inputs()
R = fix_inputs(Rotation2D(angle=90, name='rotate'), {0: 1})
m4 = S1 | R
assert_allclose(m4(0), (-6, 1))
m5 = m4.replace_submodel('rotate', Rotation2D(180))
assert_allclose(m5(0), (-1, -6))
# Check we get a value error when model name doesn't exist
with pytest.raises(ValueError):
m2 = m.replace_submodel('not_there', Scale(2))
# And now a model set
P = Polynomial1D(degree=1, n_models=2, name='poly')
S = Shift([1, 2], n_models=2)
m = P | S
assert_array_equal(m([0, 1]), (1, 2))
with pytest.raises(ValueError):
m2 = m.replace_submodel('poly', Polynomial1D(degree=1, c0=1))
m2 = m.replace_submodel('poly', Polynomial1D(degree=1, c0=[1, 2],
n_models=2))
assert_array_equal(m2([0, 1]), (2, 4))
# Ensure previous _user_inverse doesn't stick around
S1 = Shift(1)
S2 = Shift(2)
S3 = Shift(3, name='S3')
S23 = S2 | S3
S23.inverse = Shift(-4.9)
m = S1 & S23
# This should delete the S23._user_inverse
m2 = m.replace_submodel('S3', Shift(4))
assert m2(1, 2) == (2, 8)
assert m2.inverse(2, 8) == (1, 2)
@pytest.mark.parametrize(
"expr",
[
lambda m1, m2: m1 + m2,
lambda m1, m2: m1 - m2,
lambda m1, m2: m1 * m2,
lambda m1, m2: m1 / m2,
],
)
def test_compound_evaluate(expr):
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
# Some evaluate functions assume that inputs are numpy arrays or quantities including Const1D
p1 = np.array([1, 2, 3, 4, 1, 2])
p2 = np.array([1, 0, 0.5])
model1 = Polynomial1D(5)
model2 = Gaussian1D(2, 1, 5)
compound = expr(model1, model2)
assert_array_equal(
compound.evaluate(x, *p1, *p2),
expr(model1.evaluate(x, *p1), model2.evaluate(x, *p2)),
)
def test_compound_evaluate_power():
"""
Tests that compound evaluate function produces the same
result as the models with the power operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0, 0.2])
p2 = np.array([3])
model1 = Gaussian1D(2, 1, 5)
model2 = Const1D(2)
compound = model1 ** model2
assert_array_equal(
compound.evaluate(x, *p1, *p2),
model1.evaluate(x, *p1) ** model2.evaluate(x, *p2),
)
def test_compound_evaluate_double_shift():
x = np.linspace(-5, 5, 10)
y = np.linspace(-5, 5, 10)
m1 = Gaussian2D(1, 0, 0, 1, 1, 1)
m2 = Shift(1)
m3 = Shift(2)
m = Gaussian2D(1, 0, 0, 1, 1, 1) & Shift(1) & Shift(2)
assert_array_equal(
m.evaluate(x, y, x - 10, y + 20, 1, 0, 0, 1, 1, 1, 1, 2),
[
m1.evaluate(x, y, 1, 0, 0, 1, 1, 1),
m2.evaluate(x - 10, 1),
m3.evaluate(y + 20, 2),
],
)
@pytest.mark.parametrize(
"expr",
[
lambda m1, m2: m1 + m2,
lambda m1, m2: m1 - m2,
lambda m1, m2: m1 * m2,
lambda m1, m2: m1 / m2,
],
)
def test_compound_evaluate_named_param(expr):
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0, 0.2])
p2 = np.array([3, 0.5, 0.5])
model1 = Gaussian1D(2, 1, 5)
model2 = Gaussian1D(2, 1, 5)
compound = expr(model1, model2)
assert_array_equal(
compound.evaluate(
x, *p2, amplitude_0=p1[0], mean_0=p1[1], stddev_0=p1[2]
),
expr(model1.evaluate(x, *p1), model2.evaluate(x, *p2)),
)
def test_compound_evaluate_name_param_power():
"""
Tests that compound evaluate function produces the same
result as the models with the power operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0, 0.2])
p2 = np.array([3])
model1 = Gaussian1D(2, 1, 5)
model2 = Const1D(2)
compound = model1 ** model2
assert_array_equal(
compound.evaluate(
x, *p2, amplitude_0=p1[0], mean_0=p1[1], stddev_0=p1[2]
),
model1.evaluate(x, *p1) ** model2.evaluate(x, *p2),
)
def test_compound_evaluate_and():
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0.1, 0.5])
p2 = np.array([3])
model1 = Gaussian1D()
model2 = Shift()
compound = model1 & model2
assert_array_equal(
compound.evaluate(x, x, *p1, p2),
[model1.evaluate(x, *p1), model2.evaluate(x, p2)],
)
def test_compound_evaluate_or():
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([0.5])
p2_amplitude = np.array([3])
p2_mean = np.array([0])
p2_std = np.array([0.1])
model1 = Shift(0.5)
model2 = Gaussian1D(1, 0, 0.5)
compound = model1 | model2
assert_array_equal(
compound.evaluate(x, p1, p2_amplitude, p2_mean, p2_std),
model2.evaluate(model1.evaluate(x, p1), p2_amplitude, p2_mean, p2_std),
)
def test_compound_evaluate_fix_inputs_by_keyword():
"""
Tests that compound evaluate function produces the same
result as the models fix_inputs operator is applied
when using the keyword
"""
y, x = np.mgrid[:10, :10]
model_params = [3, 0, 0.1, 1, 0.5, 0]
model = Gaussian2D(1, 2, 0, 0.5)
compound = fix_inputs(model, {"x": x + 5})
assert_array_equal(
compound.evaluate(x, y, *model_params),
model.evaluate(x + 5, y, *model_params),
)
def test_compound_evaluate_fix_inputs_by_position():
"""
Tests that compound evaluate function produces the same
result as the models fix_inputs operator is applied
when using the input index
"""
y, x = np.mgrid[:10, :10]
model_params = [3, 0, 0.1, 1, 0.5, 0]
model = Gaussian2D(1, 2, 0, 0.5)
compound = fix_inputs(model, {0: x + 5})
assert_array_equal(
compound.evaluate(x, y, *model_params),
model.evaluate(x + 5, y, *model_params),
)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_multiplied_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.m * u.kg / u.s
m1 = Linear1D(slope=5*u.m/u.s/u.s, intercept=1.0*u.m/u.s)
m2 = Linear1D(slope=0.0*u.kg/u.s, intercept=10.0*u.kg)
truth = m1 * m2
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_multiplied_recursive_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.m * u.m * u.kg / u.s
m1 = Linear1D(slope=5*u.m/u.s/u.s, intercept=1.0*u.m/u.s)
m2 = Linear1D(slope=0.0*u.kg/u.s, intercept=10.0*u.kg)
m3 = Linear1D(slope=0.0*u.m/u.s, intercept=10.0*u.m)
truth = m1 * m2 * m3
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.m * u.m * u.kg * u.kg / u.s
m1 = Linear1D(slope=5*u.m/u.s/u.s, intercept=1.0*u.m/u.s)
m2 = Linear1D(slope=0.0*u.kg/u.s, intercept=10.0*u.kg)
m3 = Linear1D(slope=0.0*u.m/u.s, intercept=10.0*u.m)
m4 = Linear1D(slope=0.0*u.kg/u.s, intercept=10.0*u.kg)
m11 = m1 * m2
m22 = m3 * m4
truth = m11 * m22
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_divided_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.kg * u.m / u.s
m1 = Linear1D(slope=5*u.kg*u.m/u.s, intercept=1.0*u.kg*u.m)
m2 = Linear1D(slope=0.0*u.s/u.s, intercept=10.0*u.s)
truth = m1 / m2
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_mixed_recursive_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.kg * u.m * u.m / u.s
m1 = Linear1D(slope=5*u.kg*u.m/u.s, intercept=1.0*u.kg*u.m)
m2 = Linear1D(slope=0.0*u.s/u.s, intercept=10.0*u.s)
m3 = Linear1D(slope=0.0*u.m/u.s, intercept=10.0*u.m)
truth = m1 / m2 * m3
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.kg * u.kg * u.m * u.m / u.s
m1 = Linear1D(slope=5*u.kg*u.m/u.s, intercept=1.0*u.kg*u.m)
m2 = Linear1D(slope=0.0*u.s/u.s, intercept=10.0*u.s)
m3 = Linear1D(slope=0.0*u.m/u.s, intercept=10.0*u.m)
m4 = Linear1D(slope=0.0*u.kg/u.s, intercept=10.0*u.kg)
m11 = m1 / m2
m22 = m3 * m4
truth = m11 * m22
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
|
c2544b00a27825d63b74eaf51f77169909f5ede3930c87572ed96aa4fb71dba7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import numpy as np
import pytest
from astropy.convolution import convolve_models_fft
from astropy.modeling.models import Const1D, Const2D
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa: F401
@pytest.mark.skipif('not HAS_SCIPY')
def test_clear_cache():
m1 = Const1D()
m2 = Const1D()
model = convolve_models_fft(m1, m2, (-1, 1), 0.01)
assert model._kwargs is None
assert model._convolution is None
results = model(0)
assert results.all() == np.array([1.]).all()
assert model._kwargs is not None
assert model._convolution is not None
model.clear_cache()
assert model._kwargs is None
assert model._convolution is None
@pytest.mark.skipif('not HAS_SCIPY')
def test_input_shape_1d():
m1 = Const1D()
m2 = Const1D()
model = convolve_models_fft(m1, m2, (-1, 1), 0.01)
results = model(0)
assert results.shape == (1,)
x = np.arange(-1, 1, 0.1)
results = model(x)
assert results.shape == x.shape
@pytest.mark.skipif('not HAS_SCIPY')
def test_input_shape_2d():
m1 = Const2D()
m2 = Const2D()
model = convolve_models_fft(m1, m2, ((-1, 1), (-1, 1)), 0.01)
results = model(0, 0)
assert results.shape == (1,)
x = np.arange(-1, 1, 0.1)
results = model(x, 0)
assert results.shape == x.shape
results = model(0, x)
assert results.shape == x.shape
grid = np.meshgrid(x, x)
results = model(*grid)
assert results.shape == grid[0].shape
assert results.shape == grid[1].shape
@pytest.mark.skipif('not HAS_SCIPY')
def test__convolution_inputs():
m1 = Const2D()
m2 = Const2D()
model = convolve_models_fft(m1, m2, ((-1, 1), (-1, 1)), 0.01)
x = np.arange(-1, 1, 0.1)
y = np.arange(-2, 2, 0.1)
grid0 = np.meshgrid(x, x)
grid1 = np.meshgrid(y, y)
# scalar inputs
assert (np.array([1]), (1,)) == model._convolution_inputs(1)
# Multiple inputs
assert np.all(model._convolution_inputs(*grid0)[0] ==
np.reshape([grid0[0], grid0[1]], (2, -1)).T)
assert model._convolution_inputs(*grid0)[1] == grid0[0].shape
assert np.all(model._convolution_inputs(*grid1)[0] ==
np.reshape([grid1[0], grid1[1]], (2, -1)).T)
assert model._convolution_inputs(*grid1)[1] == grid1[0].shape
# Error
with pytest.raises(ValueError) as err:
model._convolution_inputs(grid0[0], grid1[1])
assert str(err.value) == "Values have differing shapes"
|
ea25ef284aa012d58bc2899efd1036502b5641e6f481184f21aea4a3195416be | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import os
import subprocess
import sys
import unittest.mock as mk
from inspect import signature
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
import astropy
import astropy.modeling.core as core
import astropy.units as u
from astropy.convolution import convolve_models
from astropy.modeling import models
from astropy.modeling.bounding_box import CompoundBoundingBox, ModelBoundingBox
from astropy.modeling.core import (
SPECIAL_OPERATORS, CompoundModel, Model, _add_special_operator, bind_bounding_box,
bind_compound_bounding_box, custom_model, fix_inputs)
from astropy.modeling.parameters import Parameter
from astropy.modeling.separable import separability_matrix
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa: F401
class NonFittableModel(Model):
"""An example class directly subclassing Model for testing."""
a = Parameter()
def __init__(self, a, model_set_axis=None):
super().__init__(a, model_set_axis=model_set_axis)
@staticmethod
def evaluate():
pass
def test_Model_instance_repr_and_str():
m = NonFittableModel(42.5)
assert repr(m) == "<NonFittableModel(a=42.5)>"
assert (str(m) ==
"Model: NonFittableModel\n"
"Inputs: ()\n"
"Outputs: ()\n"
"Model set size: 1\n"
"Parameters:\n"
" a \n"
" ----\n"
" 42.5")
assert len(m) == 1
def test_Model_array_parameter():
model = models.Gaussian1D(4, 2, 1)
assert_allclose(model.param_sets, [[4], [2], [1]])
def test_inputless_model():
"""
Regression test for
https://github.com/astropy/astropy/pull/3772#issuecomment-101821641
"""
class TestModel(Model):
n_outputs = 1
a = Parameter()
@staticmethod
def evaluate(a):
return a
m = TestModel(1)
assert m.a == 1
assert m() == 1
# Test array-like output
m = TestModel([1, 2, 3], model_set_axis=False)
assert len(m) == 1
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[1, 2, 3], model_set_axis=0)
assert len(m) == 3
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=0)
assert len(m) == 2
assert np.all(m() == [[1, 2, 3], [4, 5, 6]])
# Test a model set
m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=np.int64(0))
assert len(m) == 2
assert np.all(m() == [[1, 2, 3], [4, 5, 6]])
def test_ParametericModel():
with pytest.raises(TypeError):
models.Gaussian1D(1, 2, 3, wrong=4)
def test_custom_model_signature():
"""
Tests that the signatures for the __init__ and __call__
methods of custom models are useful.
"""
@custom_model
def model_a(x):
return x
assert model_a.param_names == ()
assert model_a.n_inputs == 1
sig = signature(model_a.__init__)
assert list(sig.parameters.keys()) == ['self', 'args', 'meta', 'name', 'kwargs']
sig = signature(model_a.__call__)
assert list(sig.parameters.keys()) == ['self', 'inputs', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies', 'inputs_map', 'new_inputs']
@custom_model
def model_b(x, a=1, b=2):
return x + a + b
assert model_b.param_names == ('a', 'b')
assert model_b.n_inputs == 1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ['self', 'a', 'b', 'kwargs']
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == ['self', 'inputs', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies', 'inputs_map', 'new_inputs']
@custom_model
def model_c(x, y, a=1, b=2):
return x + y + a + b
assert model_c.param_names == ('a', 'b')
assert model_c.n_inputs == 2
sig = signature(model_c.__init__)
assert list(sig.parameters.keys()) == ['self', 'a', 'b', 'kwargs']
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_c.__call__)
assert list(sig.parameters.keys()) == ['self', 'inputs', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies', 'inputs_map', 'new_inputs']
def test_custom_model_subclass():
"""Test that custom models can be subclassed."""
@custom_model
def model_a(x, a=1):
return x * a
class model_b(model_a):
# Override the evaluate from model_a
@classmethod
def evaluate(cls, x, a):
return -super().evaluate(x, a)
b = model_b()
assert b.param_names == ('a',)
assert b.a == 1
assert b(1) == -1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ['self', 'a', 'kwargs']
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == ['self', 'inputs', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies', 'inputs_map', 'new_inputs']
def test_custom_model_parametrized_decorator():
"""Tests using custom_model as a decorator with parameters."""
def cosine(x, amplitude=1):
return [amplitude * np.cos(x)]
@custom_model(fit_deriv=cosine)
def sine(x, amplitude=1):
return amplitude * np.sin(x)
assert issubclass(sine, Model)
s = sine(2)
assert_allclose(s(np.pi / 2), 2)
assert_allclose(s.fit_deriv(0, 2), 2)
def test_custom_model_n_outputs():
"""
Test creating a custom_model which has more than one output, which
requires special handling.
Demonstrates issue #11791's ``n_outputs`` error has been solved
"""
@custom_model
def model(x, y, n_outputs=2):
return x+1, y+1
m = model()
assert not isinstance(m.n_outputs, Parameter)
assert isinstance(m.n_outputs, int)
assert m.n_outputs == 2
assert m.outputs == ('x0', 'x1')
assert (separability_matrix(m) == [[True, True],
[True, True]]).all()
@custom_model
def model(x, y, z, n_outputs=3):
return x+1, y+1, z+1
m = model()
assert not isinstance(m.n_outputs, Parameter)
assert isinstance(m.n_outputs, int)
assert m.n_outputs == 3
assert m.outputs == ('x0', 'x1', 'x2')
assert (separability_matrix(m) == [[True, True, True],
[True, True, True],
[True, True, True]]).all()
def test_custom_model_settable_parameters():
"""
Test creating a custom_model which specifically sets adjustable model
parameters.
Demonstrates part of issue #11791's notes about what passed parameters
should/shouldn't be allowed. In this case, settable parameters
should be allowed to have defaults set.
"""
@custom_model
def model(x, y, n_outputs=2, bounding_box=((1, 2), (3, 4))):
return x+1, y+1
m = model()
assert m.n_outputs == 2
assert m.bounding_box == ((1, 2), (3, 4))
m.bounding_box = ((9, 10), (11, 12))
assert m.bounding_box == ((9, 10), (11, 12))
m = model(bounding_box=((5, 6), (7, 8)))
assert m.n_outputs == 2
assert m.bounding_box == ((5, 6), (7, 8))
m.bounding_box = ((9, 10), (11, 12))
assert m.bounding_box == ((9, 10), (11, 12))
@custom_model
def model(x, y, n_outputs=2, outputs=('z0', 'z1')):
return x+1, y+1
m = model()
assert m.n_outputs == 2
assert m.outputs == ('z0', 'z1')
m.outputs = ('a0', 'a1')
assert m.outputs == ('a0', 'a1')
m = model(outputs=('w0', 'w1'))
assert m.n_outputs == 2
assert m.outputs == ('w0', 'w1')
m.outputs = ('a0', 'a1')
assert m.outputs == ('a0', 'a1')
def test_custom_model_regected_parameters():
"""
Test creating a custom_model which attempts to override non-overridable
parameters.
Demonstrates part of issue #11791's notes about what passed parameters
should/shouldn't be allowed. In this case, non-settable parameters
should raise an error (unexpected behavior may occur).
"""
with pytest.raises(ValueError,
match=r"Parameter 'n_inputs' cannot be a model property: *"):
@custom_model
def model1(x, y, n_outputs=2, n_inputs=3):
return x+1, y+1
with pytest.raises(ValueError,
match=r"Parameter 'uses_quantity' cannot be a model property: *"):
@custom_model
def model2(x, y, n_outputs=2, uses_quantity=True):
return x+1, y+1
def test_custom_inverse():
"""Test setting a custom inverse on a model."""
p = models.Polynomial1D(1, c0=-2, c1=3)
# A trivial inverse for a trivial polynomial
inv = models.Polynomial1D(1, c0=(2./3.), c1=(1./3.))
with pytest.raises(NotImplementedError):
p.inverse
p.inverse = inv
x = np.arange(100)
assert_allclose(x, p(p.inverse(x)))
assert_allclose(x, p.inverse(p(x)))
p.inverse = None
with pytest.raises(NotImplementedError):
p.inverse
def test_custom_inverse_reset():
"""Test resetting a custom inverse to the model's default inverse."""
class TestModel(Model):
n_inputs = 0
outputs = ('y',)
@property
def inverse(self):
return models.Shift()
@staticmethod
def evaluate():
return 0
# The above test model has no meaning, nor does its inverse--this just
# tests that setting an inverse and resetting to the default inverse works
m = TestModel()
assert isinstance(m.inverse, models.Shift)
m.inverse = models.Scale()
assert isinstance(m.inverse, models.Scale)
del m.inverse
assert isinstance(m.inverse, models.Shift)
def test_render_model_2d():
imshape = (71, 141)
image = np.zeros(imshape)
coords = y, x = np.indices(imshape)
model = models.Gaussian2D(x_stddev=6.1, y_stddev=3.9, theta=np.pi / 3)
# test points for edges
ye, xe = [0, 35, 70], [0, 70, 140]
# test points for floating point positions
yf, xf = [35.1, 35.5, 35.9], [70.1, 70.5, 70.9]
test_pts = [(a, b) for a in xe for b in ye]
test_pts += [(a, b) for a in xf for b in yf]
for x0, y0 in test_pts:
model.x_mean = x0
model.y_mean = y0
expected = model(x, y)
for xy in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (xy is None):
# this case is tested in Fittable2DModelTester
continue
actual = model.render(out=im, coords=xy)
if im is None:
assert_allclose(actual, model.render(coords=xy))
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, y0) == (70, 35):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
# test an error is raised when the bounding box is larger than the input array
try:
actual = model.render(out=np.zeros((1, 1)))
except ValueError:
pass
def test_render_model_1d():
npix = 101
image = np.zeros(npix)
coords = np.arange(npix)
model = models.Gaussian1D()
# test points
test_pts = [0, 49.1, 49.5, 49.9, 100]
# test widths
test_stdv = np.arange(5.5, 6.7, .2)
for x0, stdv in [(p, s) for p in test_pts for s in test_stdv]:
model.mean = x0
model.stddev = stdv
expected = model(coords)
for x in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (x is None):
# this case is tested in Fittable1DModelTester
continue
actual = model.render(out=im, coords=x)
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, stdv) == (49.5, 5.5):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
@pytest.mark.filterwarnings('ignore:invalid value encountered in less')
def test_render_model_3d():
imshape = (17, 21, 27)
image = np.zeros(imshape)
coords = np.indices(imshape)
def ellipsoid(x, y, z, x0=13., y0=10., z0=8., a=4., b=3., c=2., amp=1.):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(custom_model(ellipsoid)):
@property
def bounding_box(self):
return ((self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a))
model = Ellipsoid3D()
# test points for edges
ze, ye, xe = [0, 8, 16], [0, 10, 20], [0, 13, 26]
# test points for floating point positions
zf, yf, xf = [8.1, 8.5, 8.9], [10.1, 10.5, 10.9], [13.1, 13.5, 13.9]
test_pts = [(x, y, z) for x in xe for y in ye for z in ze]
test_pts += [(x, y, z) for x in xf for y in yf for z in zf]
for x0, y0, z0 in test_pts:
model.x0 = x0
model.y0 = y0
model.z0 = z0
expected = model(*coords[::-1])
for c in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (c is None):
continue
actual = model.render(out=im, coords=c)
boxed = model.render()
# assert images match
assert_allclose(expected, actual)
# assert model fully captured
if (z0, y0, x0) == (8, 10, 13):
boxed = model.render()
assert (np.sum(expected) - np.sum(boxed)) == 0
def test_render_model_out_dtype():
"""Test different out.dtype for model.render."""
for model in [models.Gaussian2D(), models.Gaussian2D() + models.Planar2D()]:
for dtype in [np.float64, np.float32, np.complex64]:
im = np.zeros((40, 40), dtype=dtype)
imout = model.render(out=im)
assert imout is im
assert imout.sum() != 0
with pytest.raises(TypeError):
im = np.zeros((40, 40), dtype=np.int32)
imout = model.render(out=im)
def test_custom_bounding_box_1d():
"""
Tests that the bounding_box setter works.
"""
# 1D models
g1 = models.Gaussian1D()
bb = g1.bounding_box
expected = g1.render()
# assign the same bounding_box, now through the bounding_box setter
g1.bounding_box = bb
assert_allclose(g1.render(), expected)
# 2D models
g2 = models.Gaussian2D()
bb = g2.bounding_box
expected = g2.render()
# assign the same bounding_box, now through the bounding_box setter
g2.bounding_box = bb
assert_allclose(g2.render(), expected)
def test_n_submodels_in_single_models():
assert models.Gaussian1D().n_submodels == 1
assert models.Gaussian2D().n_submodels == 1
def test_compound_deepcopy():
model = (models.Gaussian1D(10, 2, 3) | models.Shift(2)) & models.Rotation2D(21.3)
new_model = model.deepcopy()
assert id(model) != id(new_model)
assert id(model._leaflist) != id(new_model._leaflist)
assert id(model[0]) != id(new_model[0])
assert id(model[1]) != id(new_model[1])
assert id(model[2]) != id(new_model[2])
@pytest.mark.skipif('not HAS_SCIPY')
def test_units_with_bounding_box():
points = np.arange(10, 20)
table = np.arange(10) * u.Angstrom
t = models.Tabular1D(points, lookup_table=table)
assert isinstance(t(10), u.Quantity)
assert isinstance(t(10, with_bounding_box=True), u.Quantity)
assert_quantity_allclose(t(10), t(10, with_bounding_box=True))
RENAMED_MODEL = models.Gaussian1D.rename('CustomGaussian')
MODEL_RENAME_CODE = """
from astropy.modeling.models import Gaussian1D
print(repr(Gaussian1D))
print(repr(Gaussian1D.rename('CustomGaussian')))
""".strip()
MODEL_RENAME_EXPECTED = b"""
<class 'astropy.modeling.functional_models.Gaussian1D'>
Name: Gaussian1D
N_inputs: 1
N_outputs: 1
Fittable parameters: ('amplitude', 'mean', 'stddev')
<class '__main__.CustomGaussian'>
Name: CustomGaussian (Gaussian1D)
N_inputs: 1
N_outputs: 1
Fittable parameters: ('amplitude', 'mean', 'stddev')
""".strip()
def test_rename_path(tmpdir):
# Regression test for a bug that caused the path to the class to be
# incorrect in a renamed model's __repr__.
assert repr(RENAMED_MODEL).splitlines()[0] == "<class 'astropy.modeling.tests.test_core.CustomGaussian'>" # noqa: E501
# Make sure that when called from a user script, the class name includes
# __main__.
env = os.environ.copy()
paths = [os.path.dirname(astropy.__path__[0])] + sys.path
env['PYTHONPATH'] = os.pathsep.join(paths)
script = tmpdir.join('rename.py').strpath
with open(script, 'w') as f:
f.write(MODEL_RENAME_CODE)
output = subprocess.check_output([sys.executable, script], env=env)
assert output.splitlines() == MODEL_RENAME_EXPECTED.splitlines()
@pytest.mark.parametrize('model_class',
[models.Gaussian1D, models.Polynomial1D,
models.Shift, models.Tabular1D])
def test_rename_1d(model_class):
new_model = model_class.rename(name='Test1D')
assert new_model.name == 'Test1D'
@pytest.mark.parametrize('model_class',
[models.Gaussian2D, models.Polynomial2D, models.Tabular2D])
def test_rename_2d(model_class):
new_model = model_class.rename(name='Test2D')
assert new_model.name == 'Test2D'
def test_fix_inputs_integer():
"""
Tests that numpy integers can be passed as dictionary keys to fix_inputs
Issue #11358
"""
m = models.Identity(2)
mf = models.fix_inputs(m, {1: 22})
assert mf(1) == (1, 22)
mf_int32 = models.fix_inputs(m, {np.int32(1): 33})
assert mf_int32(1) == (1, 33)
mf_int64 = models.fix_inputs(m, {np.int64(1): 44})
assert mf_int64(1) == (1, 44)
def test_fix_inputs_empty_dict():
"""
Tests that empty dictionary can be passed to fix_inputs
Issue #11355
"""
m = models.Identity(2)
mf = models.fix_inputs(m, {})
assert mf(1, 2) == (1, 2)
def test_rename_inputs_outputs():
g2 = models.Gaussian2D(10, 2, 3, 1, 2)
assert g2.inputs == ("x", "y")
assert g2.outputs == ("z",)
with pytest.raises(ValueError):
g2.inputs = ("w", )
with pytest.raises(ValueError):
g2.outputs = ("w", "e")
def test__prepare_output_single_model():
model = models.Gaussian1D()
# No broadcast
assert (np.array([1, 2]) ==
model._prepare_output_single_model(np.array([1, 2]), None)).all()
# Broadcast to scalar
assert 1 == model._prepare_output_single_model(np.array([1]), ())
assert 2 == model._prepare_output_single_model(np.asanyarray(2), ())
# Broadcast reshape
output = np.array([[1, 2, 3],
[4, 5, 6]])
reshape = np.array([[1, 2],
[3, 4],
[5, 6]])
assert (output == model._prepare_output_single_model(output, (2, 3))).all()
assert (reshape == model._prepare_output_single_model(output, (3, 2))).all()
# Broadcast reshape scalar
assert 1 == model._prepare_output_single_model(np.array([1]), (1, 2))
assert 2 == model._prepare_output_single_model(np.asanyarray(2), (3, 4))
# Fail to broadcast
assert (output == model._prepare_output_single_model(output, (1, 2))).all()
assert (output == model._prepare_output_single_model(output, (3, 4))).all()
def test_prepare_outputs_mixed_broadcast():
"""
Tests that _prepare_outputs_single_model does not fail when a smaller
array is passed as first input, but output is broadcast to larger
array.
Issue #10170
"""
model = models.Gaussian2D(1, 2, 3, 4, 5)
output = model([1, 2], 3)
assert output.shape == (2,)
np.testing.assert_array_equal(output, [0.9692332344763441, 1.0])
output = model(4, [5, 6])
assert output.shape == (2,)
np.testing.assert_array_equal(output, [0.8146473164114145, 0.7371233743916278])
def test_prepare_outputs_complex_reshape():
x = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]])
y = np.array([[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
[26, 27, 28, 29, 30]])
m = models.Identity(3) | models.Mapping((2, 1, 0))
m.bounding_box = ((0, 100), (0, 200), (0, 50))
mf = models.fix_inputs(m, {2: 22})
t = mf | models.Mapping((2, 1), n_inputs=3)
output = mf(1, 2)
assert output == (22, 2, 1)
output = t(1, 2)
assert output == (1, 2)
output = t(x, y)
assert len(output) == 2
np.testing.assert_array_equal(output[0], x)
np.testing.assert_array_equal(output[1], y)
m = models.Identity(3) | models.Mapping((0, 1, 2))
m.bounding_box = ((0, 100), (0, 200), (0, 50))
mf = models.fix_inputs(m, {2: 22})
t = mf | models.Mapping((0, 1), n_inputs=3)
output = mf(1, 2)
assert output == (1, 2, 22)
output = t(1, 2)
assert output == (1, 2)
output = t(x, y)
assert len(output) == 2
np.testing.assert_array_equal(output[0], x)
np.testing.assert_array_equal(output[1], y)
def test_prepare_outputs_single_entry_vector():
"""
jwst and gwcs both require that single entry vectors produce single
entry output vectors, not scalars. This tests for that behavior.
"""
model = models.Gaussian2D(1, 2, 3, 4, 5)
output = model(np.array([1]), np.array([2]))
assert output.shape == (1,)
np.testing.assert_allclose(output, [0.9500411305585278])
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings('ignore: Using a non-tuple')
def test_prepare_outputs_sparse_grid():
"""
Test to show that #11060 has been solved.
"""
shape = (3, 3)
data = np.arange(np.product(shape)).reshape(shape) * u.m / u.s
points_unit = u.pix
points = [np.arange(size) * points_unit for size in shape]
kwargs = {
'bounds_error': False,
'fill_value': np.nan,
'method': 'nearest',
}
transform = models.Tabular2D(points, data, **kwargs)
truth = np.array([[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]]) * u.m / u.s
points = np.meshgrid(np.arange(3), np.arange(3), indexing='ij', sparse=True)
x = points[0] * u.pix
y = points[1] * u.pix
value = transform(x, y)
assert (value == truth).all()
points = np.meshgrid(np.arange(3), np.arange(3), indexing='ij', sparse=False) * u.pix
value = transform(*points)
assert (value == truth).all()
def test_coerce_units():
model = models.Polynomial1D(1, c0=1, c1=2)
with pytest.raises(u.UnitsError):
model(u.Quantity(10, u.m))
with_input_units = model.coerce_units({"x": u.m})
result = with_input_units(u.Quantity(10, u.m))
assert np.isclose(result, 21.0)
with_input_units_tuple = model.coerce_units((u.m,))
result = with_input_units_tuple(u.Quantity(10, u.m))
assert np.isclose(result, 21.0)
with_return_units = model.coerce_units(return_units={"y": u.s})
result = with_return_units(10)
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with_return_units_tuple = model.coerce_units(return_units=(u.s,))
result = with_return_units_tuple(10)
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with_both = model.coerce_units({"x": u.m}, {"y": u.s})
result = with_both(u.Quantity(10, u.m))
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with pytest.raises(ValueError, match=r"input_units keys.*do not match model inputs"):
model.coerce_units({"q": u.m})
with pytest.raises(ValueError, match=r"input_units length does not match n_inputs"):
model.coerce_units((u.m, u.s))
model_with_existing_input_units = models.BlackBody()
with pytest.raises(ValueError,
match=r"Cannot specify input_units for model with existing input units"):
model_with_existing_input_units.coerce_units({"x": u.m})
with pytest.raises(ValueError, match=r"return_units keys.*do not match model outputs"):
model.coerce_units(return_units={"q": u.m})
with pytest.raises(ValueError, match=r"return_units length does not match n_outputs"):
model.coerce_units(return_units=(u.m, u.s))
def test_bounding_box_general_inverse():
model = NonFittableModel(42.5)
with pytest.raises(NotImplementedError):
model.bounding_box
model.bounding_box = ()
assert model.bounding_box.bounding_box() == ()
model.inverse = NonFittableModel(3.14)
inverse_model = model.inverse
with pytest.raises(NotImplementedError):
inverse_model.bounding_box
def test__add_special_operator():
sop_name = 'name'
sop = 'value'
key = _add_special_operator(sop_name, 'value')
assert key[0] == sop_name
assert key[1] == SPECIAL_OPERATORS._unique_id
assert key in SPECIAL_OPERATORS
assert SPECIAL_OPERATORS[key] == sop
def test_print_special_operator_CompoundModel(capsys):
"""
Test that issue #11310 has been fixed
"""
model = convolve_models(models.Sersic2D(), models.Gaussian2D())
with astropy.conf.set_temp('max_width', 80):
assert str(model) == (
"Model: CompoundModel\n"
"Inputs: ('x', 'y')\n"
"Outputs: ('z',)\n"
"Model set size: 1\n"
"Expression: convolve_fft (([0]), ([1]))\n"
"Components: \n"
" [0]: <Sersic2D(amplitude=1., r_eff=1., n=4., "
"x_0=0., y_0=0., ellip=0., theta=0.)>\n"
"\n"
" [1]: <Gaussian2D(amplitude=1., x_mean=0., y_mean=0., "
"x_stddev=1., y_stddev=1., theta=0.)>\n"
"Parameters:\n"
" amplitude_0 r_eff_0 n_0 x_0_0 y_0_0 ... y_mean_1 x_stddev_1 y_stddev_1 theta_1\n"
" ----------- ------- --- ----- ----- ... -------- ---------- ---------- -------\n"
" 1.0 1.0 4.0 0.0 0.0 ... 0.0 1.0 1.0 0.0"
)
def test__validate_input_shape():
model = models.Gaussian1D()
model._n_models = 2
_input = np.array([[1, 2, 3],
[4, 5, 6]])
# Successful validation
assert model._validate_input_shape(_input, 0, model.inputs, 1, False) == (2, 3)
# Fail number of axes
with pytest.raises(ValueError) as err:
model._validate_input_shape(_input, 0, model.inputs, 2, True)
assert str(err.value) == "For model_set_axis=2, all inputs must be at least 3-dimensional."
# Fail number of models (has argname)
with pytest.raises(ValueError) as err:
model._validate_input_shape(_input, 0, model.inputs, 1, True)
assert str(err.value) == ("Input argument 'x' does not have the correct dimensions in "
"model_set_axis=1 for a model set with n_models=2.")
# Fail number of models (no argname)
with pytest.raises(ValueError) as err:
model._validate_input_shape(_input, 0, [], 1, True)
assert str(err.value) == ("Input argument '0' does not have the correct dimensions "
"in model_set_axis=1 for a model set with n_models=2.")
def test__validate_input_shapes():
model = models.Gaussian1D()
model._n_models = 2
inputs = [mk.MagicMock() for _ in range(3)]
argnames = mk.MagicMock()
model_set_axis = mk.MagicMock()
all_shapes = [mk.MagicMock() for _ in inputs]
# Successful validation
with mk.patch.object(Model, '_validate_input_shape',
autospec=True, side_effect=all_shapes) as mkValidate:
with mk.patch.object(core, 'check_broadcast',
autospec=True) as mkCheck:
assert mkCheck.return_value == model._validate_input_shapes(inputs, argnames,
model_set_axis)
assert mkCheck.call_args_list == [mk.call(*all_shapes)]
assert mkValidate.call_args_list == [
mk.call(model, _input, idx, argnames, model_set_axis, True)
for idx, _input in enumerate(inputs)
]
# Fail check_broadcast
with mk.patch.object(Model, '_validate_input_shape',
autospec=True, side_effect=all_shapes) as mkValidate:
with mk.patch.object(core, 'check_broadcast',
autospec=True, return_value=None) as mkCheck:
with pytest.raises(ValueError) as err:
model._validate_input_shapes(inputs, argnames, model_set_axis)
assert str(err.value) == "All inputs must have identical shapes or must be scalars."
assert mkCheck.call_args_list == [mk.call(*all_shapes)]
assert mkValidate.call_args_list == [
mk.call(model, _input, idx, argnames, model_set_axis, True)
for idx, _input in enumerate(inputs)
]
def test__remove_axes_from_shape():
model = models.Gaussian1D()
# len(shape) == 0
assert model._remove_axes_from_shape((), mk.MagicMock()) == ()
# axis < 0
assert model._remove_axes_from_shape((1, 2, 3), -1) == (1, 2)
assert model._remove_axes_from_shape((1, 2, 3), -2) == (1, 3)
assert model._remove_axes_from_shape((1, 2, 3), -3) == (2, 3)
# axis >= len(shape)
assert model._remove_axes_from_shape((1, 2, 3), 3) == ()
assert model._remove_axes_from_shape((1, 2, 3), 4) == ()
# 0 <= axis < len(shape)
assert model._remove_axes_from_shape((1, 2, 3), 0) == (2, 3)
assert model._remove_axes_from_shape((1, 2, 3), 1) == (3,)
assert model._remove_axes_from_shape((1, 2, 3), 2) == ()
def test_get_bounding_box():
model = models.Const2D(2)
# No with_bbox
assert model.get_bounding_box(False) is None
# No bounding_box
with pytest.raises(NotImplementedError):
model.bounding_box
assert model.get_bounding_box(True) is None
# Normal bounding_box
model.bounding_box = ((0, 1), (0, 1))
assert not isinstance(model.bounding_box, CompoundBoundingBox)
assert model.get_bounding_box(True) == ((0, 1), (0, 1))
# CompoundBoundingBox with no removal
bbox = CompoundBoundingBox.validate(model, {(1,): ((-1, 0), (-1, 0)), (2,): ((0, 1), (0, 1))},
selector_args=[('y', False)])
model.bounding_box = bbox
assert isinstance(model.bounding_box, CompoundBoundingBox)
# Get using argument not with_bbox
assert model.get_bounding_box(True) == bbox
# Get using with_bbox not argument
assert model.get_bounding_box((1,)) == ((-1, 0), (-1, 0))
assert model.get_bounding_box((2,)) == ((0, 1), (0, 1))
def test_compound_bounding_box():
model = models.Gaussian1D()
truth = models.Gaussian1D()
bbox1 = CompoundBoundingBox.validate(model, {(1,): (-1, 0), (2,): (0, 1)},
selector_args=[('x', False)])
bbox2 = CompoundBoundingBox.validate(model, {(-0.5,): (-1, 0), (0.5,): (0, 1)},
selector_args=[('x', False)])
# Using with_bounding_box to pass a selector
model.bounding_box = bbox1
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=(1,)) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=(2,)))
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=(2,)) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(1,)))
# Using argument value to pass bounding_box
model.bounding_box = bbox2
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=True) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=True) == truth(0.5)
with pytest.raises(RuntimeError):
model(0, with_bounding_box=True)
model1 = models.Gaussian1D()
truth1 = models.Gaussian1D()
model2 = models.Const1D(2)
truth2 = models.Const1D(2)
model = model1 + model2
truth = truth1 + truth2
assert isinstance(model, CompoundModel)
model.bounding_box = bbox1
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=1) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=(2,)))
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=2) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(1,)))
model.bounding_box = bbox2
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=True) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=True) == truth(0.5)
with pytest.raises(RuntimeError):
model(0, with_bounding_box=True)
def test_bind_bounding_box():
model = models.Polynomial2D(3)
bbox = ((-1, 1), (-2, 2))
bind_bounding_box(model, bbox)
assert model.get_bounding_box() is not None
assert model.bounding_box == bbox
assert model.bounding_box['x'] == (-2, 2)
assert model.bounding_box['y'] == (-1, 1)
bind_bounding_box(model, bbox, order='F')
assert model.get_bounding_box() is not None
assert model.bounding_box == bbox
assert model.bounding_box['x'] == (-1, 1)
assert model.bounding_box['y'] == (-2, 2)
def test_bind_compound_bounding_box_using_with_bounding_box_select():
"""
This demonstrates how to bind multiple bounding_boxes which are
selectable using the `with_bounding_box`, note there must be a
fall-back to implicit.
"""
model = models.Gaussian1D()
truth = models.Gaussian1D()
bbox = (0, 1)
with pytest.raises(AttributeError):
bind_compound_bounding_box(model, bbox, 'x')
bbox = {0: (-1, 0), 1: (0, 1)}
bind_compound_bounding_box(model, bbox, [('x', False)])
# No bounding box
assert model(-0.5) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0) == truth(0)
assert model(1) == truth(1)
# `with_bounding_box` selects as `-0.5` will not be a key
assert model(-0.5, with_bounding_box=0) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=1))
# `with_bounding_box` selects as `0.5` will not be a key
assert model(0.5, with_bounding_box=1) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(0,)))
# Fall back onto implicit selector
assert model(0, with_bounding_box=True) == truth(0)
assert model(1, with_bounding_box=True) == truth(1)
# Attempt to fall-back on implicit selector, but no bounding_box
with pytest.raises(RuntimeError):
model(0.5, with_bounding_box=True)
# Override implicit selector
assert np.isnan(model(1, with_bounding_box=0))
def test_fix_inputs_compound_bounding_box():
base_model = models.Gaussian2D(1, 2, 3, 4, 5)
bbox = {2.5: (-1, 1), 3.14: (-7, 3)}
model = fix_inputs(base_model, {'y': 2.5}, bounding_boxes=bbox)
assert model.bounding_box == (-1, 1)
model = fix_inputs(base_model, {'x': 2.5}, bounding_boxes=bbox)
assert model.bounding_box == (-1, 1)
model = fix_inputs(base_model, {'y': 2.5}, bounding_boxes=bbox, selector_args=(('y', True),))
assert model.bounding_box == (-1, 1)
model = fix_inputs(base_model, {'x': 2.5}, bounding_boxes=bbox, selector_args=(('x', True),))
assert model.bounding_box == (-1, 1)
model = fix_inputs(base_model, {'x': 2.5}, bounding_boxes=bbox, selector_args=((0, True),))
assert model.bounding_box == (-1, 1)
base_model = models.Identity(4)
bbox = {(2.5, 1.3): ((-1, 1), (-3, 3)), (2.5, 2.71): ((-3, 3), (-1, 1))}
model = fix_inputs(base_model, {'x0': 2.5, 'x1': 1.3}, bounding_boxes=bbox)
assert model.bounding_box == ((-1, 1), (-3, 3))
model = fix_inputs(base_model, {'x0': 2.5, 'x1': 1.3}, bounding_boxes=bbox,
selector_args=(('x0', True), ('x1', True)))
assert model.bounding_box == ((-1, 1), (-3, 3))
model = fix_inputs(base_model, {'x0': 2.5, 'x1': 1.3}, bounding_boxes=bbox,
selector_args=((0, True), (1, True)))
assert model.bounding_box == ((-1, 1), (-3, 3))
def test_model_copy_with_bounding_box():
model = models.Polynomial2D(2)
bbox = ModelBoundingBox.validate(model, ((-0.5, 1047.5), (-0.5, 2047.5)), order='F')
# No bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with bbox
model.bounding_box = bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
for index, interval in model.bounding_box.intervals.items():
interval_copy = model_copy.bounding_box.intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(1)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_compound_model_copy_with_bounding_box():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bbox = ModelBoundingBox.validate(model, ((-0.5, 1047.5), (-0.5, 2047.5), (-np.inf, np.inf)),
order='F')
# No bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with bbox
model.bounding_box = bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
for index, interval in model.bounding_box.intervals.items():
interval_copy = model_copy.bounding_box.intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(3)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_model_copy_with_compound_bounding_box():
model = models.Polynomial2D(2)
bbox = {(0,): (-0.5, 1047.5),
(1,): (-0.5, 3047.5)}
cbbox = CompoundBoundingBox.validate(model, bbox, selector_args=[('x', True)], order='F')
# No cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with cbbox
model.bounding_box = cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
assert model_copy.bounding_box.selector_args == model.bounding_box.selector_args
assert id(model_copy.bounding_box.selector_args) != id(model.bounding_box.selector_args)
for selector, bbox in model.bounding_box.bounding_boxes.items():
for index, interval in bbox.intervals.items():
interval_copy = model_copy.bounding_box.bounding_boxes[selector].intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(1)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_compound_model_copy_with_compound_bounding_box():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bbox = {(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5)), }
cbbox = CompoundBoundingBox.validate(model, bbox, selector_args=[('slit_id', True)], order='F')
# No cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() is None
assert model.get_bounding_box() is None
# with cbbox
model.bounding_box = cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
assert model_copy.bounding_box.selector_args == model.bounding_box.selector_args
assert id(model_copy.bounding_box.selector_args) != id(model.bounding_box.selector_args)
for selector, bbox in model.bounding_box.bounding_boxes.items():
for index, interval in bbox.intervals.items():
interval_copy = model_copy.bounding_box.bounding_boxes[selector].intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(3)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() is None
assert model1.get_bounding_box() is None
def test_compound_model_copy_user_attribute():
"""Regression test for issue #12370"""
model = models.Gaussian2D(100, 25, 25, 5, 5) | models.Identity(1)
model.xname = 'x_mean' # user-defined attribute
assert hasattr(model, 'xname')
assert model.xname == 'x_mean'
model_copy = model.copy()
model_copy.xname
assert hasattr(model_copy, 'xname')
assert model_copy.xname == 'x_mean'
def test_model_mixed_array_scalar_bounding_box():
"""Regression test for issue #12319"""
model = models.Gaussian2D()
bbox = ModelBoundingBox.validate(model, ((-1, 1), (-np.inf, np.inf)), order='F')
model.bounding_box = bbox
x = np.array([-0.5, 0.5])
y = 0
# Everything works when its all in the bounding box
assert (model(x, y) == (model(x, y, with_bounding_box=True))).all()
def test_compound_model_mixed_array_scalar_bounding_box():
"""Regression test for issue #12319"""
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bbox = ModelBoundingBox.validate(model, ((-0.5, 1047.5), (-0.5, 2047.5), (-np.inf, np.inf)),
order='F')
model.bounding_box = bbox
x = np.array([1000, 1001])
y = np.array([2000, 2001])
slit_id = 0
# Everything works when its all in the bounding box
value0 = model(x, y, slit_id)
value1 = model(x, y, slit_id, with_bounding_box=True)
assert_equal(value0, value1)
def test_model_with_bounding_box_true_and_single_output():
"""Regression test for issue #12373"""
model = models.Mapping((1,))
x = [1, 2]
y = [3, 4]
# Check baseline
assert_equal(model(x, y), [3, 4])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [3, 4])
model.bounding_box = ((-np.inf, np.inf), (-np.inf, np.inf))
# Check baseline
assert_equal(model(x, y), [3, 4])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [3, 4])
def test_compound_model_with_bounding_box_true_and_single_output():
"""Regression test for issue #12373"""
model = models.Mapping((1,)) | models.Shift(1)
x = [1, 2]
y = [3, 4]
# Check baseline
assert_equal(model(x, y), [4, 5])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [4, 5])
model.bounding_box = ((-np.inf, np.inf), (-np.inf, np.inf))
# Check baseline
assert_equal(model(x, y), [4, 5])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [4, 5])
def test_bounding_box_pass_with_ignored():
"""Test the possiblity of setting ignored variables in bounding box"""
model = models.Polynomial2D(2)
bbox = ModelBoundingBox.validate(model, (-1, 1), ignored=['y'])
model.bounding_box = bbox
assert model.bounding_box.bounding_box() == (-1, 1)
assert model.bounding_box == bbox
model = models.Polynomial2D(2)
bind_bounding_box(model, (-1, 1), ignored=['y'])
assert model.bounding_box.bounding_box() == (-1, 1)
assert model.bounding_box == bbox
def test_compound_bounding_box_pass_with_ignored():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bbox = {(0,): (-0.5, 1047.5),
(1,): (-0.5, 2047.5), }
cbbox = CompoundBoundingBox.validate(model, bbox, selector_args=[('slit_id', True)],
ignored=['y'], order='F')
model.bounding_box = cbbox
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bind_compound_bounding_box(model, bbox, selector_args=[('slit_id', True)],
ignored=['y'], order='F')
assert model.bounding_box == cbbox
@pytest.mark.parametrize('int_type', [int, np.int32, np.int64, np.uint32, np.uint64])
def test_model_integer_indexing(int_type):
"""Regression for PR 12561; verify that compound model components
can be accessed by integer index"""
gauss = models.Gaussian2D()
airy = models.AiryDisk2D()
compound = gauss + airy
assert compound[int_type(0)] == gauss
assert compound[int_type(1)] == airy
def test_model_string_indexing():
"""Regression for PR 12561; verify that compound model components
can be accessed by indexing with model name"""
gauss = models.Gaussian2D()
gauss.name = 'Model1'
airy = models.AiryDisk2D()
airy.name = 'Model2'
compound = gauss + airy
assert compound['Model1'] == gauss
assert compound['Model2'] == airy
|
686fb4b504ff4be44e0edfab3adb05426f70363a066bd79d6e041d12434a02c9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for spline models and fitters"""
import unittest.mock as mk
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy.modeling.core import FittableModel, ModelDefinitionError
from astropy.modeling.fitting import (
SplineExactKnotsFitter, SplineInterpolateFitter, SplineSmoothingFitter, SplineSplrepFitter)
from astropy.modeling.parameters import Parameter
from astropy.modeling.spline import Spline1D, _Spline, _SplineFitter
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa: F401
# pylint: disable=invalid-name
from astropy.utils.exceptions import AstropyUserWarning
npts = 50
nknots = 10
np.random.seed(42)
test_w = np.random.rand(npts)
test_t = [-1, 0, 1]
noise = np.random.randn(npts)
degree_tests = [1, 2, 3, 4, 5]
wieght_tests = [None, test_w]
smoothing_tests = [None, 0.01]
class TestSpline:
def setup_class(self):
self.num_opt = 3
self.optional_inputs = {f'test{i}': mk.MagicMock() for i in range(self.num_opt)}
self.extra_kwargs = {f'new{i}': mk.MagicMock() for i in range(self.num_opt)}
class Spline(_Spline):
optional_inputs = {'test': 'test'}
def _init_parameters(self):
super()._init_parameters()
def _init_data(self, knots, coeffs, bounds=None):
super()._init_data(knots, coeffs, bounds=bounds)
self.Spline = Spline
def test___init__(self):
# empty spline
spl = self.Spline()
assert spl._t is None
assert spl._c is None
assert spl._user_knots is False
assert spl._degree is None
assert spl._test is None
assert not hasattr(spl, 'degree')
# Call _init_spline
with mk.patch.object(_Spline, '_init_spline',
autospec=True) as mkInit:
# No call (knots=None)
spl = self.Spline()
assert mkInit.call_args_list == []
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
spl = self.Spline(knots=knots, coeffs=coeffs, bounds=bounds)
assert mkInit.call_args_list == [mk.call(spl, knots, coeffs, bounds)]
assert spl._t is None
assert spl._c is None
assert spl._user_knots is False
assert spl._degree is None
assert spl._test is None
# Coeffs but no knots
with pytest.raises(ValueError) as err:
self.Spline(coeffs=mk.MagicMock())
assert str(err.value) == "If one passes a coeffs vector one needs to also pass knots!"
def test_param_names(self):
# no parameters
spl = self.Spline()
assert spl.param_names == ()
knot_names = tuple([mk.MagicMock() for _ in range(3)])
spl._knot_names = knot_names
assert spl.param_names == knot_names
coeff_names = tuple([mk.MagicMock() for _ in range(3)])
spl._coeff_names = coeff_names
assert spl.param_names == knot_names + coeff_names
def test__optional_arg(self):
spl = self.Spline()
assert spl._optional_arg('test') == '_test'
def test__create_optional_inputs(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
def __init__(self):
self._create_optional_inputs()
spl = Spline()
for arg in self.optional_inputs:
attribute = spl._optional_arg(arg)
assert hasattr(spl, attribute)
assert getattr(spl, attribute) is None
with pytest.raises(ValueError,
match=r"Optional argument .* already exists in this class!"):
spl._create_optional_inputs()
def test__intercept_optional_inputs(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
def __init__(self):
self._create_optional_inputs()
spl = Spline()
new_kwargs = spl._intercept_optional_inputs(**self.extra_kwargs)
for arg, value in self.optional_inputs.items():
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is None
assert new_kwargs == self.extra_kwargs
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
new_kwargs = spl._intercept_optional_inputs(**kwargs)
for arg, value in self.optional_inputs.items():
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is not None
assert getattr(spl, attribute) == kwargs[arg]
assert getattr(spl, attribute) != value
assert arg not in new_kwargs
assert new_kwargs == self.extra_kwargs
assert kwargs != self.extra_kwargs
with pytest.raises(RuntimeError,
match=r".* has already been set, something has gone wrong!"):
spl._intercept_optional_inputs(**kwargs)
def test_evaluate(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
spl = Spline()
# No options passed in and No options set
new_kwargs = spl.evaluate(**self.extra_kwargs)
for arg, value in self.optional_inputs.items():
assert new_kwargs[arg] == value
for arg, value in self.extra_kwargs.items():
assert new_kwargs[arg] == value
assert len(new_kwargs) == (len(self.optional_inputs) + len(self.extra_kwargs))
# No options passed in and Options set
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
spl._intercept_optional_inputs(**kwargs)
new_kwargs = spl.evaluate(**self.extra_kwargs)
assert new_kwargs == kwargs
for arg in self.optional_inputs:
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is None
# Options passed in
set_kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
spl._intercept_optional_inputs(**set_kwargs)
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
assert set_kwargs != kwargs
new_kwargs = spl.evaluate(**kwargs)
assert new_kwargs == kwargs
def test___call__(self):
spl = self.Spline()
args = tuple([mk.MagicMock() for _ in range(3)])
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
new_kwargs = {f"new_test{idx}": mk.MagicMock() for idx in range(3)}
with mk.patch.object(_Spline, "_intercept_optional_inputs",
autospec=True, return_value=new_kwargs) as mkIntercept:
with mk.patch.object(FittableModel, "__call__",
autospec=True) as mkCall:
assert mkCall.return_value == spl(*args, **kwargs)
assert mkCall.call_args_list == [mk.call(spl, *args, **new_kwargs)]
assert mkIntercept.call_args_list == [mk.call(spl, **kwargs)]
def test__create_parameter(self):
np.random.seed(37)
base_vec = np.random.random(20)
test = base_vec.copy()
fixed_test = base_vec.copy()
class Spline(self.Spline):
@property
def test(self):
return test
@property
def fixed_test(self):
return fixed_test
spl = Spline()
assert (spl.test == test).all()
assert (spl.fixed_test == fixed_test).all()
for index in range(20):
name = f"test_name{index}"
spl._create_parameter(name, index, 'test')
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.model == spl
assert param.fixed is False
assert param.value == test[index] == spl.test[index] == base_vec[index]
new_set = np.random.random()
param.value = new_set
assert spl.test[index] == new_set
assert spl.test[index] != base_vec[index]
new_get = np.random.random()
spl.test[index] = new_get
assert param.value == new_get
assert param.value != new_set
for index in range(20):
name = f"fixed_test_name{index}"
spl._create_parameter(name, index, 'fixed_test', True)
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.model == spl
assert param.fixed is True
assert param.value == fixed_test[index] == spl.fixed_test[index] == base_vec[index]
new_set = np.random.random()
param.value = new_set
assert spl.fixed_test[index] == new_set
assert spl.fixed_test[index] != base_vec[index]
new_get = np.random.random()
spl.fixed_test[index] = new_get
assert param.value == new_get
assert param.value != new_set
def test__create_parameters(self):
np.random.seed(37)
test = np.random.random(20)
class Spline(self.Spline):
@property
def test(self):
return test
spl = Spline()
fixed = mk.MagicMock()
with mk.patch.object(_Spline, '_create_parameter',
autospec=True) as mkCreate:
params = spl._create_parameters("test_param", "test", fixed)
assert params == tuple([f"test_param{idx}" for idx in range(20)])
assert mkCreate.call_args_list == [
mk.call(spl, f"test_param{idx}", idx, 'test', fixed) for idx in range(20)
]
def test__init_parameters(self):
spl = self.Spline()
with pytest.raises(NotImplementedError) as err:
spl._init_parameters()
assert str(err.value) == "This needs to be implemented"
def test__init_data(self):
spl = self.Spline()
with pytest.raises(NotImplementedError) as err:
spl._init_data(mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
assert str(err.value) == "This needs to be implemented"
with pytest.raises(NotImplementedError) as err:
spl._init_data(mk.MagicMock(), mk.MagicMock())
assert str(err.value) == "This needs to be implemented"
def test__init_spline(self):
spl = self.Spline()
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
with mk.patch.object(_Spline, "_init_parameters",
autospec=True) as mkParameters:
with mk.patch.object(_Spline, "_init_data",
autospec=True) as mkData:
main = mk.MagicMock()
main.attach_mock(mkParameters, 'parameters')
main.attach_mock(mkData, 'data')
spl._init_spline(knots, coeffs, bounds)
assert main.mock_calls == [
mk.call.data(spl, knots, coeffs, bounds=bounds),
mk.call.parameters(spl)
]
def test__init_tck(self):
spl = self.Spline()
assert spl._c is None
assert spl._t is None
assert spl._degree is None
spl = self.Spline(degree=4)
assert spl._c is None
assert spl._t is None
assert spl._degree == 4
@pytest.mark.skipif('not HAS_SCIPY')
class TestSpline1D:
def setup_class(self):
def func(x, noise=0):
return np.exp(-x**2) + 0.1*noise
self.x = np.linspace(-3, 3, npts)
self.y = func(self.x, noise)
self.truth = func(self.x)
arg_sort = np.argsort(self.x)
np.random.shuffle(arg_sort)
self.x_s = self.x[arg_sort]
self.y_s = func(self.x_s, noise[arg_sort])
self.npts_out = 1000
self.xs = np.linspace(-3, 3, self.npts_out)
self.t = np.linspace(-3, 3, nknots)[1:-1]
def check_parameter(self, spl, base_name, name, index, value, fixed):
assert base_name in name
assert index == int(name.split(base_name)[-1])
knot_name = f"{base_name}{index}"
assert knot_name == name
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.name == name
assert param.value == value(index)
assert param.model == spl
assert param.fixed is fixed
def check_parameters(self, spl, params, base_name, value, fixed):
for idx, name in enumerate(params):
self.check_parameter(spl, base_name, name, idx, value, fixed)
def update_parameters(self, spl, knots, value):
for name in knots:
param = getattr(spl, name)
param.value = value
assert param.value == value
def test___init__with_no_knot_information(self):
spl = Spline1D()
assert spl._degree == 3
assert spl._user_knots is False
assert spl._t is None
assert spl._c is None
assert spl._nu is None
# Check no parameters created
assert len(spl._knot_names) == 0
assert len(spl._coeff_names) == 0
def test___init__with_number_of_knots(self):
spl = Spline1D(knots=10)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is False
assert spl._nu is None
# Check vector data
assert len(spl._t) == 18
t = np.zeros(18)
t[-4:] = 1
assert (spl._t == t).all()
assert len(spl._c) == 18
assert (spl._c == np.zeros(18)).all()
# Check all parameter names created:
assert len(spl._knot_names) == 18
assert len(spl._coeff_names) == 18
# Check knot values:
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values:
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_full_custom_knots(self):
t = 17*np.arange(20) - 32
spl = Spline1D(knots=t)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert (spl._t == t).all()
assert len(spl._c) == 20
assert (spl._c == np.zeros(20)).all()
# Check all parameter names created
assert len(spl._knot_names) == 20
assert len(spl._coeff_names) == 20
# Check knot values:
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_interior_custom_knots(self):
t = np.arange(1, 20)
spl = Spline1D(knots=t, bounds=[0, 20])
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert len(spl._t) == 27
assert (spl._t[4:-4] == t).all()
assert (spl._t[:4] == 0).all()
assert (spl._t[-4:] == 20).all()
assert len(spl._c) == 27
assert (spl._c == np.zeros(27)).all()
# Check knot values:
def value0(idx):
if idx < 4:
return 0
elif idx >= 19 + 4:
return 20
else:
return t[idx-4]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_user_knots_and_coefficients(self):
t = 17*np.arange(20) - 32
c = np.linspace(-1, 1, 20)
spl = Spline1D(knots=t, coeffs=c)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert (spl._t == t).all()
assert len(spl._c) == 20
assert (spl._c == c).all()
# Check all parameter names created
assert len(spl._knot_names) == 20
assert len(spl._coeff_names) == 20
# Check knot values:
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__errors(self):
# Bad knot type
knots = 3.5
with pytest.raises(ValueError) as err:
Spline1D(knots=knots)
assert str(err.value) == f"Knots: {knots} must be iterable or value"
# Not enough knots
for idx in range(8):
with pytest.raises(ValueError) as err:
Spline1D(knots=np.arange(idx))
assert str(err.value) == "Must have at least 8 knots."
# Bad scipy spline
t = np.arange(20)[::-1]
with pytest.raises(ValueError):
Spline1D(knots=t)
def test_parameter_array_link(self):
spl = Spline1D(10)
# Check knot base values
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check knot vector -> knot parameter link
t = np.arange(18)
spl._t = t.copy()
def value1(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value1, True)
# Check knot parameter -> knot vector link
self.update_parameters(spl, spl._knot_names, 3)
assert (spl._t[:] == 3).all()
# Check coeff base values
def value2(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value2, False)
# Check coeff vector -> coeff parameter link
c = 5 * np.arange(18) + 18
spl._c = c.copy()
def value3(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value3, False)
# Check coeff parameter -> coeff vector link
self.update_parameters(spl, spl._coeff_names, 4)
assert (spl._c[:] == 4).all()
def test_two_splines(self):
spl0 = Spline1D(knots=10)
spl1 = Spline1D(knots=15, degree=2)
assert spl0._degree == 3
assert len(spl0._t) == 18
t = np.zeros(18)
t[-4:] = 1
assert (spl0._t == t).all()
assert len(spl0._c) == 18
assert (spl0._c == np.zeros(18)).all()
assert spl1._degree == 2
assert len(spl1._t) == 21
t = np.zeros(21)
t[-3:] = 1
assert (spl1._t == t).all()
assert len(spl1._c) == 21
assert (spl1._c == np.zeros(21)).all()
# Check all knot names created
assert len(spl0._knot_names) == 18
assert len(spl1._knot_names) == 21
# Check knot base values
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl0, spl0._knot_names, "knot", value0, True)
def value1(idx):
if idx < 21 - 3:
return 0
else:
return 1
self.check_parameters(spl1, spl1._knot_names, "knot", value1, True)
# Check knot vector -> knot parameter link
t0 = 7 * np.arange(18) + 27
t1 = 11 * np.arange(21) + 19
spl0._t[:] = t0.copy()
spl1._t[:] = t1.copy()
def value2(idx):
return t0[idx]
self.check_parameters(spl0, spl0._knot_names, "knot", value2, True)
def value3(idx):
return t1[idx]
self.check_parameters(spl1, spl1._knot_names, "knot", value3, True)
# Check knot parameter -> knot vector link
self.update_parameters(spl0, spl0._knot_names, 3)
self.update_parameters(spl1, spl1._knot_names, 4)
assert (spl0._t[:] == 3).all()
assert (spl1._t[:] == 4).all()
# Check all coeff names created
assert len(spl0._coeff_names) == 18
assert len(spl1._coeff_names) == 21
# Check coeff base values
def value4(idx):
return 0
self.check_parameters(spl0, spl0._coeff_names, "coeff", value4, False)
self.check_parameters(spl1, spl1._coeff_names, "coeff", value4, False)
# Check coeff vector -> coeff parameter link
c0 = 17 * np.arange(18) + 14
c1 = 37 * np.arange(21) + 47
spl0._c[:] = c0.copy()
spl1._c[:] = c1.copy()
def value5(idx):
return c0[idx]
self.check_parameters(spl0, spl0._coeff_names, "coeff", value5, False)
def value6(idx):
return c1[idx]
self.check_parameters(spl1, spl1._coeff_names, "coeff", value6, False)
# Check coeff parameter -> coeff vector link
self.update_parameters(spl0, spl0._coeff_names, 5)
self.update_parameters(spl1, spl1._coeff_names, 6)
assert (spl0._t[:] == 3).all()
assert (spl1._t[:] == 4).all()
assert (spl0._c[:] == 5).all()
assert (spl1._c[:] == 6).all()
def test__knot_names(self):
# no parameters
spl = Spline1D()
assert spl._knot_names == ()
# some parameters
knot_names = [f"knot{idx}" for idx in range(18)]
spl = Spline1D(10)
assert spl._knot_names == tuple(knot_names)
def test__coeff_names(self):
# no parameters
spl = Spline1D()
assert spl._coeff_names == ()
# some parameters
coeff_names = [f"coeff{idx}" for idx in range(18)]
spl = Spline1D(10)
assert spl._coeff_names == tuple(coeff_names)
def test_param_names(self):
# no parameters
spl = Spline1D()
assert spl.param_names == ()
# some parameters
knot_names = [f"knot{idx}" for idx in range(18)]
coeff_names = [f"coeff{idx}" for idx in range(18)]
param_names = knot_names + coeff_names
spl = Spline1D(10)
assert spl.param_names == tuple(param_names)
def test_t(self):
# no parameters
spl = Spline1D()
# test get
assert spl._t is None
assert (spl.t == [0, 0, 0, 0, 1, 1, 1, 1]).all()
# test set
with pytest.raises(ValueError) as err:
spl.t = mk.MagicMock()
assert str(err.value) == "The model parameters must be initialized before setting knots."
# with parameters
spl = Spline1D(10)
# test get
t = np.zeros(18)
t[-4:] = 1
assert (spl._t == t).all()
assert (spl.t == t).all()
# test set
spl.t = (np.arange(18) + 15)
assert (spl._t == (np.arange(18) + 15)).all()
assert (spl.t == (np.arange(18) + 15)).all()
assert (spl.t != t).all()
# set error
for idx in range(30):
if idx == 18:
continue
with pytest.raises(ValueError) as err:
spl.t = np.arange(idx)
assert str(err.value) == "There must be exactly as many knots as previously defined."
def test_c(self):
# no parameters
spl = Spline1D()
# test get
assert spl._c is None
assert (spl.c == [0, 0, 0, 0, 0, 0, 0, 0]).all()
# test set
with pytest.raises(ValueError) as err:
spl.c = mk.MagicMock()
assert str(err.value) == "The model parameters must be initialized before setting coeffs."
# with parameters
spl = Spline1D(10)
# test get
assert (spl._c == np.zeros(18)).all()
assert (spl.c == np.zeros(18)).all()
# test set
spl.c = (np.arange(18) + 15)
assert (spl._c == (np.arange(18) + 15)).all()
assert (spl.c == (np.arange(18) + 15)).all()
assert (spl.c != np.zeros(18)).all()
# set error
for idx in range(30):
if idx == 18:
continue
with pytest.raises(ValueError) as err:
spl.c = np.arange(idx)
assert str(err.value) == "There must be exactly as many coeffs as previously defined."
def test_degree(self):
# default degree
spl = Spline1D()
# test get
assert spl._degree == 3
assert spl.degree == 3
# test set
# non-default degree
spl = Spline1D(degree=2)
# test get
assert spl._degree == 2
assert spl.degree == 2
def test__initialized(self):
# no parameters
spl = Spline1D()
assert spl._initialized is False
# with parameters
spl = Spline1D(knots=10, degree=2)
assert spl._initialized is True
def test_tck(self):
# no parameters
spl = Spline1D()
# test get
assert (spl.t == [0, 0, 0, 0, 1, 1, 1, 1]).all()
assert (spl.c == [0, 0, 0, 0, 0, 0, 0, 0]).all()
assert spl.degree == 3
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# test set
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
t = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 5, 5])
np.random.seed(619)
c = np.random.random(12)
k = 3
spl.tck = (t, c, k)
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
def value1(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# with parameters
spl = Spline1D(knots=10, degree=2)
# test get
t = np.zeros(16)
t[-3:] = 1
assert (spl.t == t).all()
assert (spl.c == np.zeros(16)).all()
assert spl.degree == 2
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# test set
t = 5*np.arange(16) + 11
c = 7*np.arange(16) + 13
k = 2
spl.tck = (t, c, k)
assert (spl.t == t).all()
assert (spl.c == c).all()
assert spl.degree == k
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# Error
with pytest.raises(ValueError) as err:
spl.tck = (t, c, 4)
assert str(err.value) == "tck has incompatible degree!"
def test_bspline(self):
from scipy.interpolate import BSpline
# no parameters
spl = Spline1D()
bspline = spl.bspline
assert isinstance(bspline, BSpline)
assert (bspline.tck[0] == spl.tck[0]).all()
assert (bspline.tck[1] == spl.tck[1]).all()
assert bspline.tck[2] == spl.tck[2]
t = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 5, 5])
np.random.seed(619)
c = np.random.random(12)
k = 3
def value0(idx):
return t[idx]
def value1(idx):
return c[idx]
# set (bspline)
spl = Spline1D()
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
bspline = BSpline(t, c, k)
spl.bspline = bspline
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# set (tuple spline)
spl = Spline1D()
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
spl.bspline = (t, c, k)
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# with parameters
spl = Spline1D(knots=10, degree=2)
bspline = spl.bspline
assert isinstance(bspline, BSpline)
assert (bspline.tck[0] == spl.tck[0]).all()
assert (bspline.tck[1] == spl.tck[1]).all()
assert bspline.tck[2] == spl.tck[2]
def test_knots(self):
# no parameters
spl = Spline1D()
assert spl.knots == []
# with parameters
spl = Spline1D(10)
knots = spl.knots
assert len(knots) == 18
for knot in knots:
assert isinstance(knot, Parameter)
assert hasattr(spl, knot.name)
assert getattr(spl, knot.name) == knot
def test_coeffs(self):
# no parameters
spl = Spline1D()
assert spl.coeffs == []
# with parameters
spl = Spline1D(10)
coeffs = spl.coeffs
assert len(coeffs) == 18
for coeff in coeffs:
assert isinstance(coeff, Parameter)
assert hasattr(spl, coeff.name)
assert getattr(spl, coeff.name) == coeff
def test__init_parameters(self):
spl = Spline1D()
with mk.patch.object(Spline1D, '_create_parameters',
autospec=True) as mkCreate:
spl._init_parameters()
assert mkCreate.call_args_list == [
mk.call(spl, "knot", "t", fixed=True),
mk.call(spl, "coeff", "c")
]
def test__init_bounds(self):
spl = Spline1D()
has_bounds, lower, upper = spl._init_bounds()
assert has_bounds is False
assert (lower == [0, 0, 0, 0]).all()
assert (upper == [1, 1, 1, 1]).all()
assert spl._user_bounding_box is None
has_bounds, lower, upper = spl._init_bounds((-5, 5))
assert has_bounds is True
assert (lower == [-5, -5, -5, -5]).all()
assert (upper == [5, 5, 5, 5]).all()
assert spl._user_bounding_box == (-5, 5)
def test__init_knots(self):
np.random.seed(19)
lower = np.random.random(4)
upper = np.random.random(4)
# Integer
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
spl = Spline1D()
assert spl._t is None
spl._init_knots(10, mk.MagicMock(), lower, upper)
t = np.concatenate((lower, np.zeros(10), upper))
assert (spl._t == t).all()
assert mkBspline.call_args_list == [mk.call()]
# vector with bounds
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
knots = np.random.random(10)
spl = Spline1D()
assert spl._t is None
spl._init_knots(knots, True, lower, upper)
t = np.concatenate((lower, knots, upper))
assert (spl._t == t).all()
assert mkBspline.call_args_list == [mk.call()]
# vector with no bounds
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
knots = np.random.random(10)
spl = Spline1D()
assert spl._t is None
spl._init_knots(knots, False, lower, upper)
assert (spl._t == knots).all()
assert mkBspline.call_args_list == [mk.call()]
# error
for num in range(8):
knots = np.random.random(num)
spl = Spline1D()
assert spl._t is None
with pytest.raises(ValueError) as err:
spl._init_knots(knots, False, lower, upper)
assert str(err.value) == "Must have at least 8 knots."
# Error
spl = Spline1D()
assert spl._t is None
with pytest.raises(ValueError) as err:
spl._init_knots(0.5, False, lower, upper)
assert str(err.value) == "Knots: 0.5 must be iterable or value"
def test__init_coeffs(self):
np.random.seed(492)
# No coeffs
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
spl = Spline1D()
assert spl._c is None
spl._t = [1, 2, 3, 4]
spl._init_coeffs()
assert (spl._c == [0, 0, 0, 0]).all()
assert mkBspline.call_args_list == [mk.call()]
# Some coeffs
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
coeffs = np.random.random(10)
spl = Spline1D()
assert spl._c is None
spl._init_coeffs(coeffs)
assert (spl._c == coeffs).all()
assert mkBspline.call_args_list == [mk.call()]
def test__init_data(self):
spl = Spline1D()
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
has_bounds = mk.MagicMock()
lower = mk.MagicMock()
upper = mk.MagicMock()
with mk.patch.object(Spline1D, '_init_bounds', autospec=True,
return_value=(has_bounds, lower, upper)) as mkBounds:
with mk.patch.object(Spline1D, '_init_knots',
autospec=True) as mkKnots:
with mk.patch.object(Spline1D, '_init_coeffs',
autospec=True) as mkCoeffs:
main = mk.MagicMock()
main.attach_mock(mkBounds, 'bounds')
main.attach_mock(mkKnots, 'knots')
main.attach_mock(mkCoeffs, 'coeffs')
spl._init_data(knots, coeffs, bounds)
assert main.mock_calls == [
mk.call.bounds(spl, bounds),
mk.call.knots(spl, knots, has_bounds, lower, upper),
mk.call.coeffs(spl, coeffs)
]
def test_evaluate(self):
spl = Spline1D()
args = tuple([mk.MagicMock() for _ in range(3)])
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
new_kwargs = {f"new_test{idx}": mk.MagicMock() for idx in range(3)}
with mk.patch.object(_Spline, 'evaluate', autospec=True,
return_value=new_kwargs) as mkEval:
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
assert mkBspline.return_value.return_value == spl.evaluate(*args, **kwargs)
assert mkBspline.return_value.call_args_list == [mk.call(args[0], **new_kwargs)]
assert mkBspline.call_args_list == [mk.call()]
assert mkEval.call_args_list == [mk.call(spl, *args, **kwargs)]
# Error
for idx in range(5, 8):
with mk.patch.object(_Spline, 'evaluate', autospec=True,
return_value={'nu': idx}):
with pytest.raises(RuntimeError) as err:
spl.evaluate(*args, **kwargs)
assert str(err.value) == "Cannot evaluate a derivative of order higher than 4"
def check_knots_created(self, spl, k):
def value0(idx):
return self.x[0]
def value1(idx):
return self.x[-1]
for idx in range(k + 1):
name = f"knot{idx}"
self.check_parameter(spl, "knot", name, idx, value0, True)
index = len(spl.t) - (k + 1) + idx
name = f"knot{index}"
self.check_parameter(spl, "knot", name, index, value1, True)
def value3(idx):
return spl.t[idx]
assert len(spl._knot_names) == len(spl.t)
for idx, name in enumerate(spl._knot_names):
assert name == f"knot{idx}"
self.check_parameter(spl, "knot", name, idx, value3, True)
def check_coeffs_created(self, spl):
def value(idx):
return spl.c[idx]
assert len(spl._coeff_names) == len(spl.c)
for idx, name in enumerate(spl._coeff_names):
assert name == f"coeff{idx}"
self.check_parameter(spl, "coeff", name, idx, value, False)
@staticmethod
def check_base_spline(spl, t, c, k):
"""Check the base spline form"""
if t is None:
assert spl._t is None
else:
assert_allclose(spl._t, t)
if c is None:
assert spl._c is None
else:
assert_allclose(spl._c, c)
assert spl.degree == k
assert spl._bounding_box is None
def check_spline_fit(self, fit_spl, spline, fitter, atol_fit, atol_truth):
"""Check the spline fit"""
assert_allclose(fit_spl.t, spline._eval_args[0])
assert_allclose(fit_spl.c, spline._eval_args[1])
assert_allclose(fitter.fit_info['spline']._eval_args[0], spline._eval_args[0])
assert_allclose(fitter.fit_info['spline']._eval_args[1], spline._eval_args[1])
# check that _parameters are correct
assert len(fit_spl._parameters) == len(fit_spl.t) + len(fit_spl.c)
assert_allclose(fit_spl._parameters[:len(fit_spl.t)], fit_spl.t)
assert_allclose(fit_spl._parameters[len(fit_spl.t):], fit_spl.c)
# check that parameters are correct
assert len(fit_spl.parameters) == len(fit_spl.t) + len(fit_spl.c)
assert_allclose(fit_spl.parameters[:len(fit_spl.t)], fit_spl.t)
assert_allclose(fit_spl.parameters[len(fit_spl.t):], fit_spl.c)
assert_allclose(spline.get_residual(), fitter.fit_info['resid'])
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), fitter.fit_info['spline'](self.x))
assert_allclose(fit_spl(self.x), self.y, atol=atol_fit)
assert_allclose(fit_spl(self.x), self.truth, atol=atol_truth)
def check_bbox(self, spl, fit_spl, fitter, w, **kwargs):
"""Check the spline fit with bbox option"""
bbox = [self.x[0], self.x[-1]]
bbox_spl = fitter(spl, self.x, self.y, weights=w, bbox=bbox, **kwargs)
assert bbox_spl.bounding_box == tuple(bbox)
assert_allclose(fit_spl.t, bbox_spl.t)
assert_allclose(fit_spl.c, bbox_spl.c)
def check_knots_warning(self, fitter, knots, k, w, **kwargs):
"""Check that the knots warning is raised"""
spl = Spline1D(knots=knots, degree=k)
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, weights=w, **kwargs)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
def test_interpolate_fitter(self, w, k):
fitter = SplineInterpolateFitter()
assert fitter.fit_info == {'resid': None, 'spline': None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, None, None, k)
assert len(fit_spl.t) == (len(self.x) + k + 1) == len(fit_spl._knot_names)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import InterpolatedUnivariateSpline, UnivariateSpline
spline = InterpolatedUnivariateSpline(self.x, self.y, w=w, k=k)
assert isinstance(fitter.fit_info['spline'], UnivariateSpline)
assert spline.get_residual() == 0
self.check_spline_fit(fit_spl, spline, fitter, 0, 1)
self.check_bbox(spl, fit_spl, fitter, w)
knots = np.linspace(self.x[0], self.x[-1], len(self.x) + k + 1)
self.check_knots_warning(fitter, knots, k, w)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
@pytest.mark.parametrize('s', smoothing_tests)
def test_smoothing_fitter(self, w, k, s):
fitter = SplineSmoothingFitter()
assert fitter.fit_info == {'resid': None, 'spline': None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, s=s, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(self.x, self.y, w=w, k=k, s=s)
assert isinstance(fitter.fit_info['spline'], UnivariateSpline)
self.check_spline_fit(fit_spl, spline, fitter, 1, 1)
self.check_bbox(spl, fit_spl, fitter, w, s=s)
# test warning
knots = fit_spl.t.copy()
self.check_knots_warning(fitter, knots, k, w, s=s)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
def test_exact_knots_fitter(self, w, k):
fitter = SplineExactKnotsFitter()
assert fitter.fit_info == {'resid': None, 'spline': None}
knots = [-1, 0, 1]
t = np.concatenate(([self.x[0]]*(k + 1), knots, [self.x[-1]]*(k + 1)))
c = np.zeros(len(t))
# With knots preset
spl = Spline1D(knots=knots, degree=k, bounds=[self.x[0], self.x[-1]])
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
assert len(fit_spl.t) == len(t) == len(fit_spl._knot_names)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
spline = LSQUnivariateSpline(self.x, self.y, knots, w=w, k=k)
assert isinstance(fitter.fit_info['spline'], UnivariateSpline)
assert_allclose(spline.get_residual(), 0.1, atol=1)
assert_allclose(fitter.fit_info['spline'].get_residual(), 0.1, atol=1)
self.check_spline_fit(fit_spl, spline, fitter, 1, 1)
self.check_bbox(spl, fit_spl, fitter, w)
# Pass knots via fitter function
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, t=knots, weights=w)
# pass no knots
spl = Spline1D(degree=k)
with pytest.raises(RuntimeError) as err:
fitter(spl, self.x, self.y, weights=w)
assert str(err.value) == "No knots have been provided"
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
@pytest.mark.parametrize('s', smoothing_tests)
def test_splrep_fitter_no_knots(self, w, k, s):
fitter = SplineSplrepFitter()
assert fitter.fit_info == {'fp': None, 'ier': None, 'msg': None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, s=s, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import BSpline, splrep
tck, spline_fp, spline_ier, spline_msg = splrep(self.x, self.y,
w=w, k=k, s=s, full_output=1)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
assert fitter.fit_info['fp'] == spline_fp
assert fitter.fit_info['ier'] == spline_ier
assert fitter.fit_info['msg'] == spline_msg
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w, s=s)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
def test_splrep_fitter_with_knots(self, w, k):
fitter = SplineSplrepFitter()
assert fitter.fit_info == {'fp': None, 'ier': None, 'msg': None}
knots = [-1, 0, 1]
t = np.concatenate(([self.x[0]]*(k + 1), knots, [self.x[-1]]*(k + 1)))
c = np.zeros(len(t))
# With knots preset
spl = Spline1D(knots=knots, degree=k, bounds=[self.x[0], self.x[-1]])
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import BSpline, splrep
tck, spline_fp, spline_ier, spline_msg = splrep(self.x, self.y,
w=w, k=k, t=knots, full_output=1)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
assert fitter.fit_info['fp'] == spline_fp
assert fitter.fit_info['ier'] == spline_ier
assert fitter.fit_info['msg'] == spline_msg
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w)
# test warning
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, t=knots, weights=w)
# With no knots present
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, t=knots, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import BSpline, splrep
tck = splrep(self.x, self.y, w=w, k=k, t=knots)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w, t=knots)
def generate_spline(self, w=None, bbox=[None]*2, k=None, s=None, t=None):
if k is None:
k = 3
from scipy.interpolate import BSpline, splrep
tck = splrep(self.x, self.y, w=w, xb=bbox[0], xe=bbox[1],
k=k, s=s, t=t)
return BSpline(*tck)
def test_derivative(self):
bspline = self.generate_spline()
spl = Spline1D()
spl.bspline = bspline
assert_allclose(spl.t, bspline.t)
assert_allclose(spl.c, bspline.c)
assert spl.degree == bspline.k
# 1st derivative
d_bspline = bspline.derivative(nu=1)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=1))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=2))
assert_allclose(d_bspline(self.xs, nu=2), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=3), bspline(self.xs, nu=4))
der = spl.derivative()
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 2
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=1))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=2))
assert_allclose(der.evaluate(self.xs, nu=2), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=3), spl.evaluate(self.xs, nu=4))
# 2nd derivative
d_bspline = bspline.derivative(nu=2)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=2))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=2), bspline(self.xs, nu=4))
der = spl.derivative(nu=2)
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 1
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=2))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=2), spl.evaluate(self.xs, nu=4))
# 3rd derivative
d_bspline = bspline.derivative(nu=3)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=4))
der = spl.derivative(nu=3)
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 0
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=4))
# Too many derivatives
for nu in range(4, 9):
with pytest.raises(ValueError) as err:
spl.derivative(nu=nu)
assert str(err.value) == "Must have nu <= 3"
def test_antiderivative(self):
bspline = self.generate_spline()
spl = Spline1D()
spl.bspline = bspline
# 1st antiderivative
a_bspline = bspline.antiderivative(nu=1)
assert_allclose(bspline(self.xs), a_bspline(self.xs, nu=1))
assert_allclose(bspline(self.xs, nu=1), a_bspline(self.xs, nu=2))
assert_allclose(bspline(self.xs, nu=2), a_bspline(self.xs, nu=3))
assert_allclose(bspline(self.xs, nu=3), a_bspline(self.xs, nu=4))
assert_allclose(bspline(self.xs, nu=4), a_bspline(self.xs, nu=5))
anti = spl.antiderivative()
assert_allclose(anti.t, a_bspline.t)
assert_allclose(anti.c, a_bspline.c)
assert anti.degree == a_bspline.k == 4
assert_allclose(spl.evaluate(self.xs), anti.evaluate(self.xs, nu=1))
assert_allclose(spl.evaluate(self.xs, nu=1), anti.evaluate(self.xs, nu=2))
assert_allclose(spl.evaluate(self.xs, nu=2), anti.evaluate(self.xs, nu=3))
assert_allclose(spl.evaluate(self.xs, nu=3), anti.evaluate(self.xs, nu=4))
assert_allclose(spl.evaluate(self.xs, nu=4), anti.evaluate(self.xs, nu=5))
# 2nd antiderivative
a_bspline = bspline.antiderivative(nu=2)
assert_allclose(bspline(self.xs), a_bspline(self.xs, nu=2))
assert_allclose(bspline(self.xs, nu=1), a_bspline(self.xs, nu=3))
assert_allclose(bspline(self.xs, nu=2), a_bspline(self.xs, nu=4))
assert_allclose(bspline(self.xs, nu=3), a_bspline(self.xs, nu=5))
assert_allclose(bspline(self.xs, nu=4), a_bspline(self.xs, nu=6))
anti = spl.antiderivative(nu=2)
assert_allclose(anti.t, a_bspline.t)
assert_allclose(anti.c, a_bspline.c)
assert anti.degree == a_bspline.k == 5
assert_allclose(spl.evaluate(self.xs), anti.evaluate(self.xs, nu=2))
assert_allclose(spl.evaluate(self.xs, nu=1), anti.evaluate(self.xs, nu=3))
assert_allclose(spl.evaluate(self.xs, nu=2), anti.evaluate(self.xs, nu=4))
assert_allclose(spl.evaluate(self.xs, nu=3), anti.evaluate(self.xs, nu=5))
assert_allclose(spl.evaluate(self.xs, nu=4), anti.evaluate(self.xs, nu=6))
# Too many anti derivatives
for nu in range(3, 9):
with pytest.raises(ValueError) as err:
spl.antiderivative(nu=nu)
assert str(err.value) == ("Supported splines can have max degree 5, "
f"antiderivative degree will be {nu + 3}")
def test__SplineFitter_error(self):
spl = Spline1D()
class SplineFitter(_SplineFitter):
def _fit_method(self, model, x, y, **kwargs):
super()._fit_method(model, x, y, **kwargs)
fitter = SplineFitter()
with pytest.raises(ValueError) as err:
fitter(spl, mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
assert str(err.value) == "1D model can only have 2 data points."
with pytest.raises(ModelDefinitionError) as err:
fitter(mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
assert str(err.value) == "Only spline models are compatible with this fitter."
with pytest.raises(NotImplementedError) as err:
fitter(spl, mk.MagicMock(), mk.MagicMock())
assert str(err.value) == "This has not been implemented for _SplineFitter."
|
38540592d36d317709a763740e05ffa3cd33a718d7146e1add6081f33d08136a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test separability of models.
"""
import numpy as np
# pylint: disable=invalid-name
import pytest
from numpy.testing import assert_allclose
from astropy.modeling import custom_model, models
from astropy.modeling.core import ModelDefinitionError
from astropy.modeling.models import Mapping
from astropy.modeling.separable import (
_arith_oper, _cdot, _coord_matrix, _cstack, is_separable, separability_matrix)
sh1 = models.Shift(1, name='shift1')
sh2 = models.Shift(2, name='sh2')
scl1 = models.Scale(1, name='scl1')
scl2 = models.Scale(2, name='scl2')
map1 = Mapping((0, 1, 0, 1), name='map1')
map2 = Mapping((0, 0, 1), name='map2')
map3 = Mapping((0, 0), name='map3')
rot = models.Rotation2D(2, name='rotation')
p2 = models.Polynomial2D(1, name='p2')
p22 = models.Polynomial2D(2, name='p22')
p1 = models.Polynomial1D(1, name='p1')
cm_4d_expected = (np.array([False, False, True, True]),
np.array([[True, True, False, False],
[True, True, False, False],
[False, False, True, False],
[False, False, False, True]]))
compound_models = {
'cm1': (map3 & sh1 | rot & sh1 | sh1 & sh2 & sh1,
(np.array([False, False, True]),
np.array([[True, False], [True, False], [False, True]]))
),
'cm2': (sh1 & sh2 | rot | map1 | p2 & p22,
(np.array([False, False]),
np.array([[True, True], [True, True]]))
),
'cm3': (map2 | rot & scl1,
(np.array([False, False, True]),
np.array([[True, False], [True, False], [False, True]]))
),
'cm4': (sh1 & sh2 | map2 | rot & scl1,
(np.array([False, False, True]),
np.array([[True, False], [True, False], [False, True]]))
),
'cm5': (map3 | sh1 & sh2 | scl1 & scl2,
(np.array([False, False]),
np.array([[True], [True]]))
),
'cm7': (map2 | p2 & sh1,
(np.array([False, True]),
np.array([[True, False], [False, True]]))
),
'cm8': (rot & (sh1 & sh2), cm_4d_expected),
'cm9': (rot & sh1 & sh2, cm_4d_expected),
'cm10': ((rot & sh1) & sh2, cm_4d_expected),
'cm11': (rot & sh1 & (scl1 & scl2),
(np.array([False, False, True, True, True]),
np.array([[True, True, False, False, False],
[True, True, False, False, False],
[False, False, True, False, False],
[False, False, False, True, False],
[False, False, False, False, True]]))),
}
def test_coord_matrix():
c = _coord_matrix(p2, 'left', 2)
assert_allclose(np.array([[1, 1], [0, 0]]), c)
c = _coord_matrix(p2, 'right', 2)
assert_allclose(np.array([[0, 0], [1, 1]]), c)
c = _coord_matrix(p1, 'left', 2)
assert_allclose(np.array([[1], [0]]), c)
c = _coord_matrix(p1, 'left', 1)
assert_allclose(np.array([[1]]), c)
c = _coord_matrix(sh1, 'left', 2)
assert_allclose(np.array([[1], [0]]), c)
c = _coord_matrix(sh1, 'right', 2)
assert_allclose(np.array([[0], [1]]), c)
c = _coord_matrix(sh1, 'right', 3)
assert_allclose(np.array([[0], [0], [1]]), c)
c = _coord_matrix(map3, 'left', 2)
assert_allclose(np.array([[1], [1]]), c)
c = _coord_matrix(map3, 'left', 3)
assert_allclose(np.array([[1], [1], [0]]), c)
def test_cdot():
result = _cdot(sh1, scl1)
assert_allclose(result, np.array([[1]]))
result = _cdot(rot, p2)
assert_allclose(result, np.array([[2, 2]]))
result = _cdot(rot, rot)
assert_allclose(result, np.array([[2, 2], [2, 2]]))
result = _cdot(Mapping((0, 0)), rot)
assert_allclose(result, np.array([[2], [2]]))
with pytest.raises(ModelDefinitionError,
match=r"Models cannot be combined with the \"|\" operator; .*"):
_cdot(sh1, map1)
def test_cstack():
result = _cstack(sh1, scl1)
assert_allclose(result, np.array([[1, 0], [0, 1]]))
result = _cstack(sh1, rot)
assert_allclose(result,
np.array([[1, 0, 0],
[0, 1, 1],
[0, 1, 1]])
)
result = _cstack(rot, sh1)
assert_allclose(result,
np.array([[1, 1, 0],
[1, 1, 0],
[0, 0, 1]])
)
def test_arith_oper():
# Models as inputs
result = _arith_oper(sh1, scl1)
assert_allclose(result, np.array([[1]]))
result = _arith_oper(rot, rot)
assert_allclose(result, np.array([[1, 1], [1, 1]]))
# ndarray
result = _arith_oper(np.array([[1, 2], [3, 4]]), np.array([[1, 2], [3, 4]]))
assert_allclose(result, np.array([[1, 1], [1, 1]]))
# Error
with pytest.raises(ModelDefinitionError,
match=r"Unsupported operands for arithmetic operator: .*"):
_arith_oper(sh1, map1)
@pytest.mark.parametrize(('compound_model', 'result'), compound_models.values())
def test_separable(compound_model, result):
assert_allclose(is_separable(compound_model), result[0])
assert_allclose(separability_matrix(compound_model), result[1])
def test_custom_model_separable():
@custom_model
def model_a(x):
return x
assert model_a().separable
@custom_model
def model_c(x, y):
return x + y
assert not model_c().separable
assert np.all(separability_matrix(model_c()) == [True, True])
|
71d36a21c997c8ec80f266f289c6e15242dd48b55f9e5e861bd9893fa1d17158 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests models.parameters
"""
# pylint: disable=invalid-name
import functools
import itertools
import unittest.mock as mk
import numpy as np
import pytest
from astropy import units as u
from astropy.modeling import fitting, models
from astropy.modeling.core import FittableModel, Model
from astropy.modeling.parameters import InputParameterError, Parameter, _tofloat, param_repr_oneline
from astropy.utils.data import get_pkg_data_filename
from . import irafutil
def setter1(val):
return val
def setter2(val, model):
model.do_something(val)
return val * model.p
class SetterModel(FittableModel):
n_inputs = 2
n_outputs = 1
xc = Parameter(default=1, setter=setter1)
yc = Parameter(default=1, setter=setter2)
def do_something(self, v):
pass
def __init__(self, xc, yc, p):
self.p = p # p is a value intended to be used by the setter
super().__init__()
self.xc = xc
self.yc = yc
def evaluate(self, x, y, xc, yc):
return (x - xc)**2 + (y - yc)**2
class TParModel(Model):
"""
A toy model to test parameters machinery
"""
coeff = Parameter()
e = Parameter()
def __init__(self, coeff, e, **kwargs):
super().__init__(coeff=coeff, e=e, **kwargs)
@staticmethod
def evaluate(coeff, e):
pass
class MockModel(FittableModel):
alpha = Parameter(name='alpha', default=42)
@staticmethod
def evaluate(*args):
pass
def test__tofloat():
# iterable
value = _tofloat([1, 2, 3])
assert isinstance(value, np.ndarray)
assert (value == np.array([1, 2, 3])).all()
assert np.all([isinstance(val, float) for val in value])
value = _tofloat(np.array([1, 2, 3]))
assert isinstance(value, np.ndarray)
assert (value == np.array([1, 2, 3])).all()
assert np.all([isinstance(val, float) for val in value])
with pytest.raises(InputParameterError) as err:
_tofloat('test')
assert str(err.value) == "Parameter of <class 'str'> could not be converted to float"
# quantity
assert _tofloat(1 * u.m) == 1 * u.m
# dimensions/scalar array
value = _tofloat(np.asanyarray(3))
assert isinstance(value, float)
assert value == 3
# A regular number
value = _tofloat(3)
assert isinstance(value, float)
assert value == 3
value = _tofloat(3.0)
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.float32(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.float64(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.int32(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.int64(3))
assert isinstance(value, float)
assert value == 3
# boolean
message = "Expected parameter to be of numerical type, not boolean"
with pytest.raises(InputParameterError) as err:
_tofloat(True)
assert str(err.value) == message
with pytest.raises(InputParameterError) as err:
_tofloat(False)
assert str(err.value) == message
# other
class Value(object):
pass
with pytest.raises(InputParameterError) as err:
_tofloat(Value)
assert str(err.value) == "Don't know how to convert parameter of <class 'type'> to float"
def test_parameter_properties():
"""Test if getting / setting of Parameter properties works."""
p = Parameter('alpha', default=1)
assert p.name == 'alpha'
# Parameter names are immutable
with pytest.raises(AttributeError):
p.name = 'beta'
assert p.fixed is False
p.fixed = True
assert p.fixed is True
assert p.tied is False
p.tied = lambda _: 0
p.tied = False
assert p.tied is False
assert p.min is None
p.min = 42
assert p.min == 42
p.min = None
assert p.min is None
assert p.max is None
p.max = 41
assert p.max == 41
def test_parameter_operators():
"""Test if the parameter arithmetic operators work."""
par = Parameter('alpha', default=42)
num = 42.
val = 3
assert par - val == num - val
assert val - par == val - num
assert par / val == num / val
assert val / par == val / num
assert par ** val == num ** val
assert val ** par == val ** num
assert par < 45
assert par > 41
assert par <= par
assert par >= par
assert par == par
assert -par == -num
assert abs(par) == abs(num)
# Test inherited models
class M1(Model):
m1a = Parameter(default=1.)
m1b = Parameter(default=5.)
def evaluate():
pass
class M2(M1):
m2c = Parameter(default=11.)
class M3(M2):
m3d = Parameter(default=20.)
def test_parameter_inheritance():
mod = M3()
assert mod.m1a == 1.
assert mod.m1b == 5.
assert mod.m2c == 11.
assert mod.m3d == 20.
for key in ['m1a', 'm1b', 'm2c', 'm3d']:
assert key in mod.__dict__
assert mod.param_names == ('m1a', 'm1b', 'm2c', 'm3d')
def test_param_metric():
mod = M3()
assert mod._param_metrics['m1a']['slice'] == slice(0, 1)
assert mod._param_metrics['m1b']['slice'] == slice(1, 2)
assert mod._param_metrics['m2c']['slice'] == slice(2, 3)
assert mod._param_metrics['m3d']['slice'] == slice(3, 4)
mod._parameters_to_array()
assert (mod._parameters == np.array([1., 5., 11., 20], dtype=np.float64)).all()
class TestParameters:
def setup_class(self):
"""
Unit tests for parameters
Read an iraf database file created by onedspec.identify. Use the
information to create a 1D Chebyshev model and perform the same fit.
Create also a gaussian model.
"""
test_file = get_pkg_data_filename('data/idcompspec.fits')
f = open(test_file)
lines = f.read()
reclist = lines.split("begin")
f.close()
record = irafutil.IdentifyRecord(reclist[1])
self.icoeff = record.coeff
order = int(record.fields['order'])
self.model = models.Chebyshev1D(order - 1)
self.gmodel = models.Gaussian1D(2, mean=3, stddev=4)
self.linear_fitter = fitting.LinearLSQFitter()
self.x = record.x
self.y = record.z
self.yy = np.array([record.z, record.z])
def test_set_parameters_as_list(self):
"""Tests updating parameters using a list."""
self.model.parameters = [30, 40, 50, 60, 70]
assert (self.model.parameters == [30., 40., 50., 60, 70]).all()
def test_set_parameters_as_array(self):
"""Tests updating parameters using an array."""
self.model.parameters = np.array([3, 4, 5, 6, 7])
assert (self.model.parameters == [3., 4., 5., 6., 7.]).all()
def test_set_as_tuple(self):
"""Tests updating parameters using a tuple."""
self.model.parameters = (1, 2, 3, 4, 5)
assert (self.model.parameters == [1, 2, 3, 4, 5]).all()
def test_set_model_attr_seq(self):
"""
Tests updating the parameters attribute when a model's
parameter (in this case coeff) is updated.
"""
self.model.parameters = [0, 0., 0., 0, 0]
self.model.c0 = 7
assert (self.model.parameters == [7, 0., 0., 0, 0]).all()
def test_set_model_attr_num(self):
"""Update the parameter list when a model's parameter is updated."""
self.gmodel.amplitude = 7
assert (self.gmodel.parameters == [7, 3, 4]).all()
def test_set_item(self):
"""Update the parameters using indexing."""
self.model.parameters = [1, 2, 3, 4, 5]
tpar = self.model.parameters
tpar[0] = 10.
self.model.parameters = tpar
assert (self.model.parameters == [10, 2, 3, 4, 5]).all()
assert self.model.c0 == 10
def test_wrong_size1(self):
"""
Tests raising an error when attempting to reset the parameters
using a list of a different size.
"""
with pytest.raises(InputParameterError):
self.model.parameters = [1, 2, 3]
def test_wrong_size2(self):
"""
Tests raising an exception when attempting to update a model's
parameter (in this case coeff) with a sequence of the wrong size.
"""
with pytest.raises(InputParameterError):
self.model.c0 = [1, 2, 3]
def test_wrong_shape(self):
"""
Tests raising an exception when attempting to update a model's
parameter and the new value has the wrong shape.
"""
with pytest.raises(InputParameterError):
self.gmodel.amplitude = [1, 2]
def test_par_against_iraf(self):
"""
Test the fitter modifies model.parameters.
Uses an iraf example.
"""
new_model = self.linear_fitter(self.model, self.x, self.y)
np.testing.assert_allclose(
new_model.parameters,
np.array([4826.1066602783685, 952.8943813407858, 12.641236013982386,
-1.7910672553339604, 0.90252884366711317]),
rtol=10 ** (-2))
def testPolynomial1D(self):
d = {'c0': 11, 'c1': 12, 'c2': 13, 'c3': 14}
p1 = models.Polynomial1D(3, **d)
np.testing.assert_equal(p1.parameters, [11, 12, 13, 14])
def test_poly1d_multiple_sets(self):
p1 = models.Polynomial1D(3, n_models=3)
np.testing.assert_equal(p1.parameters, [0.0, 0.0, 0.0, 0, 0, 0,
0, 0, 0, 0, 0, 0])
np.testing.assert_array_equal(p1.c0, [0, 0, 0])
p1.c0 = [10, 10, 10]
np.testing.assert_equal(p1.parameters, [10.0, 10.0, 10.0, 0, 0,
0, 0, 0, 0, 0, 0, 0])
def test_par_slicing(self):
"""
Test assigning to a parameter slice
"""
p1 = models.Polynomial1D(3, n_models=3)
p1.c0[:2] = [10, 10]
np.testing.assert_equal(p1.parameters, [10.0, 10.0, 0.0, 0, 0,
0, 0, 0, 0, 0, 0, 0])
def test_poly2d(self):
p2 = models.Polynomial2D(degree=3)
p2.c0_0 = 5
np.testing.assert_equal(p2.parameters, [5, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def test_poly2d_multiple_sets(self):
kw = {'c0_0': [2, 3], 'c1_0': [1, 2], 'c2_0': [4, 5],
'c0_1': [1, 1], 'c0_2': [2, 2], 'c1_1': [5, 5]}
p2 = models.Polynomial2D(2, **kw)
np.testing.assert_equal(p2.parameters, [2, 3, 1, 2, 4, 5,
1, 1, 2, 2, 5, 5])
def test_shift_model_parameters1d(self):
sh1 = models.Shift(2)
sh1.offset = 3
assert sh1.offset == 3
assert sh1.offset.value == 3
def test_scale_model_parametersnd(self):
sc1 = models.Scale([2, 2])
sc1.factor = [3, 3]
assert np.all(sc1.factor == [3, 3])
np.testing.assert_array_equal(sc1.factor.value, [3, 3])
def test_bounds(self):
# Valid __init__
param = Parameter(bounds=(1, 2))
assert param.bounds == (1, 2)
param = Parameter(min=1, max=2)
assert param.bounds == (1, 2)
# Errors __init__
message = ("bounds may not be specified simultaneously with min or max"
" when instantiating Parameter test")
with pytest.raises(ValueError) as err:
Parameter(bounds=(1, 2), min=1, name='test')
assert str(err.value) == message
with pytest.raises(ValueError) as err:
Parameter(bounds=(1, 2), max=2, name='test')
assert str(err.value) == message
with pytest.raises(ValueError) as err:
Parameter(bounds=(1, 2), min=1, max=2, name='test')
assert str(err.value) == message
# Setters
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.bounds == (None, None) == param._bounds
# Set errors
with pytest.raises(TypeError) as err:
param.bounds = ('test', None)
assert str(err.value) == "Min value must be a number or a Quantity"
with pytest.raises(TypeError) as err:
param.bounds = (None, 'test')
assert str(err.value) == "Max value must be a number or a Quantity"
# Set number
param.bounds = (1, 2)
assert param.bounds == (1, 2) == param._bounds
# Set Quantity
param.bounds = (1 * u.m, 2 * u.m)
assert param.bounds == (1, 2) == param._bounds
def test_modify_value(self):
param = Parameter(name='test', default=[1, 2, 3])
assert (param.value == [1, 2, 3]).all()
# Errors
with pytest.raises(InputParameterError) as err:
param[slice(0, 0)] = 2
assert str(err.value) == "Slice assignment outside the parameter dimensions for 'test'"
with pytest.raises(InputParameterError) as err:
param[3] = np.array([5])
assert str(err.value) == "Input dimension 3 invalid for 'test' parameter with dimension 1"
# assignment of a slice
param[slice(0, 2)] = [4, 5]
assert (param.value == [4, 5, 3]).all()
# assignment of a value
param[2] = 6
assert (param.value == [4, 5, 6]).all()
def test__set_unit(self):
param = Parameter(name='test', default=[1, 2, 3])
assert param.unit is None
# No force Error (no existing unit)
with pytest.raises(ValueError) as err:
param._set_unit(u.m)
assert str(err.value) == ("Cannot attach units to parameters that were "
"not initially specified with units")
# Force
param._set_unit(u.m, True)
assert param.unit == u.m
# No force Error (existing unit)
with pytest.raises(ValueError) as err:
param._set_unit(u.K)
assert str(err.value) == ("Cannot change the unit attribute directly, instead change the "
"parameter to a new quantity")
def test_quantity(self):
param = Parameter(name='test', default=[1, 2, 3])
assert param.unit is None
assert param.quantity is None
param = Parameter(name='test', default=[1, 2, 3], unit=u.m)
assert param.unit == u.m
assert (param.quantity == np.array([1, 2, 3]) * u.m).all()
def test_shape(self):
# Array like
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.shape == (4,)
# Reshape error
with pytest.raises(ValueError) as err:
param.shape = (5,)
assert str(err.value) == "cannot reshape array of size 4 into shape (5,)"
# Reshape success
param.shape = (2, 2)
assert param.shape == (2, 2)
assert (param.value == [[1, 2], [3, 4]]).all()
# Scalar
param = Parameter(name='test', default=1)
assert param.shape == ()
# Reshape error
with pytest.raises(ValueError) as err:
param.shape = (5,)
assert str(err.value) == "Cannot assign this shape to a scalar quantity"
param.shape = (1,)
# single value
param = Parameter(name='test', default=np.array([1]))
assert param.shape == (1,)
# Reshape error
with pytest.raises(ValueError) as err:
param.shape = (5,)
assert str(err.value) == "Cannot assign this shape to a scalar quantity"
param.shape = ()
def test_size(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.size == 4
param = Parameter(name='test', default=[1])
assert param.size == 1
param = Parameter(name='test', default=1)
assert param.size == 1
def test_std(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.std is None
assert param._std is None
param.std = 5
assert param.std == 5 == param._std
def test_fixed(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.fixed is False
assert param._fixed is False
# Set error
with pytest.raises(ValueError) as err:
param.fixed = 3
assert str(err.value) == "Value must be boolean"
# Set
param.fixed = True
assert param.fixed is True
assert param._fixed is True
def test_tied(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.tied is False
assert param._tied is False
# Set error
with pytest.raises(TypeError) as err:
param.tied = mk.NonCallableMagicMock()
assert str(err.value) == "Tied must be a callable or set to False or None"
# Set None
param.tied = None
assert param.tied is None
assert param._tied is None
# Set False
param.tied = False
assert param.tied is False
assert param._tied is False
# Set other
tied = mk.MagicMock()
param.tied = tied
assert param.tied == tied == param._tied
def test_validator(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param._validator is None
valid = mk.MagicMock()
param.validator(valid)
assert param._validator == valid
with pytest.raises(ValueError) as err:
param.validator(mk.NonCallableMagicMock())
assert str(err.value) == ("This decorator method expects a callable.\n"
"The use of this method as a direct validator is\n"
"deprecated; use the new validate method instead\n")
def test_validate(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param._validator is None
assert param.model is None
# Run without validator
param.validate(mk.MagicMock())
# Run with validator but no Model
validator = mk.MagicMock()
param.validator(validator)
assert param._validator == validator
param.validate(mk.MagicMock())
assert validator.call_args_list == []
# Full validate
param._model = mk.MagicMock()
value = mk.MagicMock()
param.validate(value)
assert validator.call_args_list == [mk.call(param._model, value)]
def test_copy(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
copy_param = param.copy()
assert (param == copy_param).all()
assert id(param) != id(copy_param)
def test_model(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.model is None
assert param._model is None
assert param._model_required is False
assert (param._value == [1, 2, 3, 4]).all()
setter = mk.MagicMock()
getter = mk.MagicMock()
param._setter = setter
param._getter = getter
# No Model Required
param._value = [5, 6, 7, 8]
model0 = mk.MagicMock()
setter0 = mk.MagicMock()
getter0 = mk.MagicMock()
with mk.patch.object(Parameter, '_create_value_wrapper',
side_effect=[setter0, getter0]) as mkCreate:
param.model = model0
assert param.model == model0 == param._model
assert param._setter == setter0
assert param._getter == getter0
assert mkCreate.call_args_list == [
mk.call(setter, model0),
mk.call(getter, model0)
]
assert param._value == [5, 6, 7, 8]
param._setter = setter
param._getter = getter
# Model required
param._model_required = True
model1 = mk.MagicMock()
setter1 = mk.MagicMock()
getter1 = mk.MagicMock()
setter1.return_value = [9, 10, 11, 12]
getter1.return_value = [9, 10, 11, 12]
with mk.patch.object(Parameter, '_create_value_wrapper',
side_effect=[setter1, getter1]) as mkCreate:
param.model = model1
assert param.model == model1 == param._model
assert param._setter == setter1
assert param._getter == getter1
assert mkCreate.call_args_list == [
mk.call(setter, model1),
mk.call(getter, model1)
]
assert (param.value == [9, 10, 11, 12]).all()
param._setter = setter
param._getter = getter
param._default = None
with mk.patch.object(Parameter, '_create_value_wrapper',
side_effect=[setter1, getter1]) as mkCreate:
param.model = model1
assert param.model == model1 == param._model
assert param._setter == setter1
assert param._getter == getter1
assert mkCreate.call_args_list == [
mk.call(setter, model1),
mk.call(getter, model1)
]
assert param._value is None
def test_raw_value(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
# Normal case
assert (param._raw_value == param.value).all()
# Bad setter
param._setter = True
param._internal_value = 4
assert param._raw_value == 4
def test__create_value_wrapper(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
# Bad ufunc
with pytest.raises(TypeError) as err:
param._create_value_wrapper(np.add, mk.MagicMock())
assert str(err.value) == ("A numpy.ufunc used for Parameter getter/setter "
"may only take one input argument")
# Good ufunc
assert param._create_value_wrapper(np.negative, mk.MagicMock()) == np.negative
# None
assert param._create_value_wrapper(None, mk.MagicMock()) is None
# wrapper with one argument
def wrapper1(a):
pass
assert param._create_value_wrapper(wrapper1, mk.MagicMock()) == wrapper1
# wrapper with two argument2
def wrapper2(a, b):
pass
# model is None
assert param._model_required is False
assert param._create_value_wrapper(wrapper2, None) == wrapper2
assert param._model_required is True
# model is not None
param._model_required = False
model = mk.MagicMock()
with mk.patch.object(functools, 'partial', autospec=True) as mkPartial:
assert param._create_value_wrapper(wrapper2, model) == mkPartial.return_value
# wrapper with more than 2 arguments
def wrapper3(a, b, c):
pass
with pytest.raises(TypeError) as err:
param._create_value_wrapper(wrapper3, mk.MagicMock())
assert str(err.value) == ("Parameter getter/setter must be a function "
"of either one or two arguments")
def test_bool(self):
# single value is true
param = Parameter(name='test', default=1)
assert param.value == 1
assert np.all(param)
if param:
assert True
else:
assert False
# single value is false
param = Parameter(name='test', default=0)
assert param.value == 0
assert not np.all(param)
if param:
assert False
else:
assert True
# vector value all true
param = Parameter(name='test', default=[1, 2, 3, 4])
assert np.all(param.value == [1, 2, 3, 4])
assert np.all(param)
if param:
assert True
else:
assert False
# vector value at least one false
param = Parameter(name='test', default=[1, 2, 0, 3, 4])
assert np.all(param.value == [1, 2, 0, 3, 4])
assert not np.all(param)
if param:
assert False
else:
assert True
def test_param_repr_oneline(self):
# Single value no units
param = Parameter(name='test', default=1)
assert param_repr_oneline(param) == '1.'
# Vector value no units
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param_repr_oneline(param) == '[1., 2., 3., 4.]'
# Single value units
param = Parameter(name='test', default=1*u.m)
assert param_repr_oneline(param) == '1. m'
# Vector value units
param = Parameter(name='test', default=[1, 2, 3, 4] * u.m)
assert param_repr_oneline(param) == '[1., 2., 3., 4.] m'
class TestMultipleParameterSets:
def setup_class(self):
self.x1 = np.arange(1, 10, .1)
self.y, self.x = np.mgrid[:10, :7]
self.x11 = np.array([self.x1, self.x1]).T
self.gmodel = models.Gaussian1D([12, 10], [3.5, 5.2], stddev=[.4, .7],
n_models=2)
def test_change_par(self):
"""
Test that a change to one parameter as a set propagates to param_sets.
"""
self.gmodel.amplitude = [1, 10]
np.testing.assert_almost_equal(
self.gmodel.param_sets,
np.array([[1.,
10],
[3.5,
5.2],
[0.4,
0.7]]))
np.all(self.gmodel.parameters == [1.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_par2(self):
"""
Test that a change to one single parameter in a set propagates to
param_sets.
"""
self.gmodel.amplitude[0] = 11
np.testing.assert_almost_equal(
self.gmodel.param_sets,
np.array([[11.,
10],
[3.5,
5.2],
[0.4,
0.7]]))
np.all(self.gmodel.parameters == [11.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_parameters(self):
self.gmodel.parameters = [13, 10, 9, 5.2, 0.4, 0.7]
np.testing.assert_almost_equal(self.gmodel.amplitude.value, [13., 10.])
np.testing.assert_almost_equal(self.gmodel.mean.value, [9., 5.2])
class TestParameterInitialization:
"""
This suite of tests checks most if not all cases if instantiating a model
with parameters of different shapes/sizes and with different numbers of
parameter sets.
"""
def test_single_model_scalar_parameters(self):
t = TParModel(10, 1)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[10], [1]])
assert np.all(t.parameters == [10, 1])
assert t.coeff.shape == ()
assert t.e.shape == ()
def test_single_model_scalar_and_array_parameters(self):
t = TParModel(10, [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert len(t.param_sets) == 2
assert np.all(t.param_sets[0] == [10])
assert np.all(t.param_sets[1] == [[1, 2]])
assert np.all(t.parameters == [10, 1, 2])
assert t.coeff.shape == ()
assert t.e.shape == (2,)
def test_single_model_1d_array_parameters(self):
t = TParModel([10, 20], [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[10, 20]], [[1, 2]]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
def test_single_model_1d_array_different_length_parameters(self):
with pytest.raises(InputParameterError):
# Not broadcastable
TParModel([1, 2], [3, 4, 5])
def test_single_model_2d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[[10, 20], [30, 40]]],
[[[1, 2], [3, 4]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
def test_single_model_2d_non_square_parameters(self):
coeff = np.array([[10, 20], [30, 40], [50, 60]])
e = np.array([[1, 2], [3, 4], [5, 6]])
t = TParModel(coeff, e)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[[10, 20], [30, 40], [50, 60]]],
[[[1, 2], [3, 4], [5, 6]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60,
1, 2, 3, 4, 5, 6])
assert t.coeff.shape == (3, 2)
assert t.e.shape == (3, 2)
t2 = TParModel(coeff.T, e.T)
assert len(t2) == 1
assert t2.model_set_axis is False
assert np.all(t2.param_sets == [[[[10, 30, 50], [20, 40, 60]]],
[[[1, 3, 5], [2, 4, 6]]]])
assert np.all(t2.parameters == [10, 30, 50, 20, 40, 60,
1, 3, 5, 2, 4, 6])
assert t2.coeff.shape == (2, 3)
assert t2.e.shape == (2, 3)
# Not broadcastable
with pytest.raises(InputParameterError):
TParModel(coeff, e.T)
with pytest.raises(InputParameterError):
TParModel(coeff.T, e)
def test_single_model_2d_broadcastable_parameters(self):
t = TParModel([[10, 20, 30], [40, 50, 60]], [1, 2, 3])
assert len(t) == 1
assert t.model_set_axis is False
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 20, 30], [40, 50, 60]]])
assert np.all(t.param_sets[1] == [[1, 2, 3]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3])
@pytest.mark.parametrize(('p1', 'p2'), [
(1, 2), (1, [2, 3]), ([1, 2], 3), ([1, 2, 3], [4, 5]),
([1, 2], [3, 4, 5])])
def test_two_model_incorrect_scalar_parameters(self, p1, p2):
with pytest.raises(InputParameterError):
TParModel(p1, p2, n_models=2)
@pytest.mark.parametrize('kwargs', [
{'n_models': 2}, {'model_set_axis': 0},
{'n_models': 2, 'model_set_axis': 0}])
def test_two_model_scalar_parameters(self, kwargs):
t = TParModel([10, 20], [1, 2], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[10, 20], [1, 2]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
@pytest.mark.parametrize('kwargs', [
{'n_models': 2}, {'model_set_axis': 0},
{'n_models': 2, 'model_set_axis': 0}])
def test_two_model_scalar_and_array_parameters(self, kwargs):
t = TParModel([10, 20], [[1, 2], [3, 4]], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[10], [20]])
assert np.all(t.param_sets[1] == [[1, 2], [3, 4]])
assert np.all(t.parameters == [10, 20, 1, 2, 3, 4])
assert t.coeff.shape == (2,)
assert t.e.shape == (2, 2)
def test_two_model_1d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[[10, 20], [30, 40]],
[[1, 2], [3, 4]]])
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
t2 = TParModel([[10, 20, 30], [40, 50, 60]],
[[1, 2, 3], [4, 5, 6]], n_models=2)
assert len(t2) == 2
assert t2.model_set_axis == 0
assert np.all(t2.param_sets == [[[10, 20, 30], [40, 50, 60]],
[[1, 2, 3], [4, 5, 6]]])
assert np.all(t2.parameters == [10, 20, 30, 40, 50, 60,
1, 2, 3, 4, 5, 6])
assert t2.coeff.shape == (2, 3)
assert t2.e.shape == (2, 3)
def test_two_model_mixed_dimension_array_parameters(self):
with pytest.raises(InputParameterError):
# Can't broadcast different array shapes
TParModel([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[9, 10, 11], [12, 13, 14]], n_models=2)
t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[1, 2], [3, 4]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 20], [30, 40]],
[[50, 60], [70, 80]]])
assert np.all(t.param_sets[1] == [[[1, 2]], [[3, 4]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80,
1, 2, 3, 4])
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2)
def test_two_model_2d_array_parameters(self):
t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[[[10, 20], [30, 40]],
[[50, 60], [70, 80]]],
[[[1, 2], [3, 4]],
[[5, 6], [7, 8]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80,
1, 2, 3, 4, 5, 6, 7, 8])
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2, 2)
def test_two_model_nonzero_model_set_axis(self):
# An example where the model set axis is the *last* axis of the
# parameter arrays
coeff = np.array([[[10, 20, 30], [30, 40, 50]], [[50, 60, 70], [70, 80, 90]]])
coeff = np.rollaxis(coeff, 0, 3)
e = np.array([[1, 2, 3], [3, 4, 5]])
e = np.rollaxis(e, 0, 2)
t = TParModel(coeff, e, n_models=2, model_set_axis=-1)
assert len(t) == 2
assert t.model_set_axis == -1
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 50], [20, 60], [30, 70]],
[[30, 70], [40, 80], [50, 90]]])
assert np.all(t.param_sets[1] == [[[1, 3], [2, 4], [3, 5]]])
assert np.all(t.parameters == [10, 50, 20, 60, 30, 70, 30, 70, 40, 80,
50, 90, 1, 3, 2, 4, 3, 5])
assert t.coeff.shape == (2, 3, 2) # note change in api
assert t.e.shape == (3, 2) # note change in api
def test_wrong_number_of_params(self):
with pytest.raises(InputParameterError):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), n_models=2)
with pytest.raises(InputParameterError):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), model_set_axis=0)
def test_wrong_number_of_params2(self):
with pytest.raises(InputParameterError):
TParModel(coeff=[[1, 2], [3, 4]], e=4, n_models=2)
with pytest.raises(InputParameterError):
TParModel(coeff=[[1, 2], [3, 4]], e=4, model_set_axis=0)
def test_array_parameter1(self):
with pytest.raises(InputParameterError):
TParModel(np.array([[1, 2], [3, 4]]), 1, model_set_axis=0)
def test_array_parameter2(self):
with pytest.raises(InputParameterError):
TParModel(np.array([[1, 2], [3, 4]]), (1, 1, 11),
model_set_axis=0)
def test_array_parameter4(self):
"""
Test multiple parameter model with array-valued parameters of the same
size as the number of parameter sets.
"""
t4 = TParModel([[1, 2], [3, 4]], [5, 6], model_set_axis=False)
assert len(t4) == 1
assert t4.coeff.shape == (2, 2)
assert t4.e.shape == (2,)
assert np.issubdtype(t4.param_sets.dtype, np.object_)
assert np.all(t4.param_sets[0] == [[1, 2], [3, 4]])
assert np.all(t4.param_sets[1] == [5, 6])
def test_non_broadcasting_parameters():
"""
Tests that in a model with 3 parameters that do not all mutually broadcast,
this is determined correctly regardless of what order the parameters are
in.
"""
a = 3
b = np.array([[1, 2, 3], [4, 5, 6]])
c = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
class TestModel(Model):
p1 = Parameter()
p2 = Parameter()
p3 = Parameter()
def evaluate(self, *args):
return
# a broadcasts with both b and c, but b does not broadcast with c
for args in itertools.permutations((a, b, c)):
with pytest.raises(InputParameterError):
TestModel(*args)
def test_setter():
pars = np.random.rand(20).reshape((10, 2))
model = SetterModel(xc=-1, yc=3, p=np.pi)
for x, y in pars:
np.testing.assert_almost_equal(
model(x, y),
(x + 1)**2 + (y - np.pi * 3)**2)
|
3316fde3e54fa51da469cf7ea134e5b0911ac27322232d651b11140fb8fa1ffa | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for physical functions."""
# pylint: disable=no-member, invalid-name
import numpy as np
import pytest
from astropy import cosmology
from astropy import units as u
from astropy.modeling.fitting import DogBoxLSQFitter, LevMarLSQFitter, LMLSQFitter, TRFLSQFitter
from astropy.modeling.physical_models import NFW, BlackBody
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa: F401
from astropy.utils.exceptions import AstropyUserWarning
__doctest_skip__ = ["*"]
fitters = [LevMarLSQFitter, TRFLSQFitter, LMLSQFitter, DogBoxLSQFitter]
# BlackBody tests
@pytest.mark.parametrize("temperature", (3000 * u.K, 2726.85 * u.deg_C))
def test_blackbody_evaluate(temperature):
b = BlackBody(temperature=temperature, scale=1.0)
assert_quantity_allclose(b(1.4 * u.micron), 486787299458.15656 * u.MJy / u.sr)
assert_quantity_allclose(b(214.13747 * u.THz), 486787299458.15656 * u.MJy / u.sr)
def test_blackbody_weins_law():
b = BlackBody(293.0 * u.K)
assert_quantity_allclose(b.lambda_max, 9.890006672986939 * u.micron)
assert_quantity_allclose(b.nu_max, 17.22525080856469 * u.THz)
def test_blackbody_sefanboltzman_law():
b = BlackBody(293.0 * u.K)
assert_quantity_allclose(b.bolometric_flux, 133.02471751812573 * u.W / (u.m * u.m))
def test_blackbody_input_units():
SLAM = u.erg / (u.cm ** 2 * u.s * u.AA * u.sr)
SNU = u.erg / (u.cm ** 2 * u.s * u.Hz * u.sr)
b_lam = BlackBody(3000*u.K, scale=1*SLAM)
assert(b_lam.input_units['x'] == u.AA)
b_nu = BlackBody(3000*u.K, scale=1*SNU)
assert(b_nu.input_units['x'] == u.Hz)
def test_blackbody_return_units():
# return of evaluate has no units when temperature has no units
b = BlackBody(1000.0 * u.K, scale=1.0)
assert not isinstance(b.evaluate(1.0 * u.micron, 1000.0, 1.0), u.Quantity)
# return has "standard" units when scale has no units
b = BlackBody(1000.0 * u.K, scale=1.0)
assert isinstance(b(1.0 * u.micron), u.Quantity)
assert b(1.0 * u.micron).unit == u.erg / (u.cm ** 2 * u.s * u.Hz * u.sr)
# return has scale units when scale has units
b = BlackBody(1000.0 * u.K, scale=1.0 * u.MJy / u.sr)
assert isinstance(b(1.0 * u.micron), u.Quantity)
assert b(1.0 * u.micron).unit == u.MJy / u.sr
# scale has units but evaluate scale has no units
assert_quantity_allclose(b.evaluate(1.0 * u.micron, 1000.0 * u.K, 4.0),
89668184.86321202 * u.MJy / u.sr)
@pytest.mark.skipif("not HAS_SCIPY")
@pytest.mark.parametrize("fitter", fitters)
def test_blackbody_fit(fitter):
fitter = fitter()
if isinstance(fitter, TRFLSQFitter) or isinstance(fitter, DogBoxLSQFitter):
rtol = 0.54
atol = 1e-15
else:
rtol = 1e-7
atol = 0
b = BlackBody(3000 * u.K, scale=5e-17 * u.Jy / u.sr)
wav = np.array([0.5, 5, 10]) * u.micron
fnu = np.array([1, 10, 5]) * u.Jy / u.sr
b_fit = fitter(b, wav, fnu, maxiter=1000)
assert_quantity_allclose(b_fit.temperature, 2840.7438355865065 * u.K, rtol=rtol)
assert_quantity_allclose(b_fit.scale, 5.803783292762381e-17, atol=atol)
def test_blackbody_overflow():
"""Test Planck function with overflow."""
photlam = u.photon / (u.cm ** 2 * u.s * u.AA)
wave = [0.0, 1000.0, 100000.0, 1e55] # Angstrom
temp = 10000.0 # Kelvin
bb = BlackBody(temperature=temp * u.K, scale=1.0)
with pytest.warns(
AstropyUserWarning,
match=r'Input contains invalid wavelength/frequency value\(s\)'):
with np.errstate(all="ignore"):
bb_lam = bb(wave) * u.sr
flux = bb_lam.to(photlam, u.spectral_density(wave * u.AA)) / u.sr
# First element is NaN, last element is very small, others normal
assert np.isnan(flux[0])
with np.errstate(all="ignore"):
assert np.log10(flux[-1].value) < -134
np.testing.assert_allclose(
flux.value[1:-1], [0.00046368, 0.04636773], rtol=1e-3
) # 0.1% accuracy in PHOTLAM/sr
with np.errstate(all="ignore"):
flux = bb(1.0 * u.AA)
assert flux.value == 0
def test_blackbody_exceptions_and_warnings():
"""Test exceptions."""
# Negative temperature
with pytest.raises(
ValueError,
match="Temperature should be positive: \\[-100.\\] K"):
bb = BlackBody(-100 * u.K)
bb(1.0 * u.micron)
bb = BlackBody(5000 * u.K)
# Zero wavelength given for conversion to Hz
with pytest.warns(AstropyUserWarning, match='invalid') as w:
bb(0 * u.AA)
assert len(w) == 3 # 2 of these are RuntimeWarning from zero divide
# Negative wavelength given for conversion to Hz
with pytest.warns(AstropyUserWarning, match='invalid') as w:
bb(-1.0 * u.AA)
assert len(w) == 1
# Test that a non surface brightness convertible scale unit raises an error
with pytest.raises(
ValueError,
match="scale units not dimensionless or in surface brightness: Jy"):
bb = BlackBody(5000 * u.K, scale=1.0 * u.Jy)
def test_blackbody_array_temperature():
"""Regression test to make sure that the temperature can be an array."""
multibb = BlackBody([100, 200, 300] * u.K)
flux = multibb(1.2 * u.mm)
np.testing.assert_allclose(
flux.value, [1.804908e-12, 3.721328e-12, 5.638513e-12], rtol=1e-5
)
flux = multibb([2, 4, 6] * u.mm)
np.testing.assert_allclose(
flux.value, [6.657915e-13, 3.420677e-13, 2.291897e-13], rtol=1e-5
)
multibb = BlackBody(np.ones(4) * u.K)
flux = multibb(np.ones((3, 4)) * u.mm)
assert flux.shape == (3, 4)
def test_blackbody_dimensionless():
"""Test support for dimensionless (but not unscaled) units for scale"""
T = 3000 * u.K
r = 1e14 * u.cm
DL = 100 * u.Mpc
scale = np.pi * (r / DL)**2
bb1 = BlackBody(temperature=T, scale=scale)
# even though we passed scale with units, we should be able to evaluate with unitless
bb1.evaluate(0.5, T.value, scale.to_value(u.dimensionless_unscaled))
bb2 = BlackBody(temperature=T, scale=scale.to_value(u.dimensionless_unscaled))
bb2.evaluate(0.5, T.value, scale.to_value(u.dimensionless_unscaled))
# bolometric flux for both cases should be equivalent
assert(bb1.bolometric_flux == bb2.bolometric_flux)
@pytest.mark.skipif("not HAS_SCIPY")
def test_blackbody_dimensionless_fit():
T = 3000 * u.K
r = 1e14 * u.cm
DL = 100 * u.Mpc
scale = np.pi * (r / DL)**2
bb1 = BlackBody(temperature=T, scale=scale)
bb2 = BlackBody(temperature=T, scale=scale.to_value(u.dimensionless_unscaled))
fitter = LevMarLSQFitter()
wav = np.array([0.5, 5, 10]) * u.micron
fnu = np.array([1, 10, 5]) * u.Jy / u.sr
bb1_fit = fitter(bb1, wav, fnu, maxiter=1000)
bb2_fit = fitter(bb2, wav, fnu, maxiter=1000)
assert(bb1_fit.temperature == bb2_fit.temperature)
@pytest.mark.parametrize("mass", (2.0000000000000E15 * u.M_sun, 3.976819741e+45 * u.kg))
def test_NFW_evaluate(mass):
"""Evaluation, density, and radii validation of NFW model."""
# Test parameters
concentration = 8.5
redshift = 0.63
cosmo = cosmology.Planck15
# Parsec tests
# 200c Overdensity
massfactor = ("critical", 200)
n200c = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200c(3.0 * u.Mpc), (3.709693508e+12 * (u.solMass / u.Mpc ** 3),
7.376391187e+42 * (u.kg / u.Mpc ** 3)))
assert_quantity_allclose(n200c.rho_scale, (7800150779863018.0 * (u.solMass / u.Mpc ** 3)))
assert_quantity_allclose(n200c.r_s, (0.24684627641195428 * u.Mpc))
assert_quantity_allclose(n200c.r_virial, (2.0981933495016114 * u.Mpc))
# 200m Overdensity
massfactor = ("mean", 200)
n200m = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200m(3.0 * u.Mpc), (3.626093406e+12 * (u.solMass / u.Mpc**3),
7.210159921e+42 * (u.kg / u.Mpc**3)))
assert_quantity_allclose(n200m.rho_scale, (5118547639858115.0 * (u.solMass / u.Mpc ** 3)))
assert_quantity_allclose(n200m.r_s, (0.2840612517326848 * u.Mpc))
assert_quantity_allclose(n200m.r_virial, (2.414520639727821 * u.Mpc))
# Virial mass
massfactor = ("virial")
nvir = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(nvir(3.0 * u.Mpc), (3.646475546e+12 * (u.solMass / u.Mpc**3),
7.250687967e+42 * (u.kg / u.Mpc**3)))
assert_quantity_allclose(nvir.rho_scale, (5649367524651067.0 * (u.solMass / u.Mpc ** 3)))
assert_quantity_allclose(nvir.r_s, (0.2748701862303786 * u.Mpc))
assert_quantity_allclose(nvir.r_virial, (2.3363965829582183 * u.Mpc))
# kpc tests
# 200c Overdensity
massfactor = ("critical", 200)
n200c = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200c(3141 * u.kpc), (3254.373619264334 * (u.solMass / u.kpc ** 3),
6.471028627484543e+33 * (u.kg / u.kpc ** 3)))
assert_quantity_allclose(n200c.rho_scale, (7800150.779863021 * (u.solMass / u.kpc ** 3)))
assert_quantity_allclose(n200c.r_s, (246.84627641195425 * u.kpc))
assert_quantity_allclose(n200c.r_virial, (2098.193349501611 * u.kpc))
# 200m Overdensity
massfactor = ("mean", 200)
n200m = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200m(3141 * u.kpc), (3184.0370866188623 * (u.solMass / u.kpc**3),
6.33117077170161e+33 * (u.kg / u.kpc**3)))
assert_quantity_allclose(n200m.rho_scale, (5118547.639858116 * (u.solMass / u.kpc ** 3)))
assert_quantity_allclose(n200m.r_s, (284.0612517326848 * u.kpc))
assert_quantity_allclose(n200m.r_virial, (2414.5206397278207 * u.kpc))
# Virial mass
massfactor = ("virial")
nvir = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(nvir(3141 * u.kpc), (3201.1946851294997 * (u.solMass / u.kpc**3),
6.365287109937637e+33 * (u.kg / u.kpc**3)))
assert_quantity_allclose(nvir.rho_scale, (5649367.5246510655 * (u.solMass / u.kpc ** 3)))
assert_quantity_allclose(nvir.r_s, (274.87018623037864 * u.kpc))
assert_quantity_allclose(nvir.r_virial, (2336.3965829582185 * u.kpc))
# Meter tests
# 200c Overdensity
massfactor = ("critical", 200)
n200c = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200c(4.2e+23 * u.m), (1.527649658673012e-57 * (u.solMass / u.m ** 3),
3.0375936602739256e-27 * (u.kg / u.m ** 3)))
assert_quantity_allclose(n200c.rho_scale, (2.654919529637763e-52 * (u.solMass / u.m ** 3)))
assert_quantity_allclose(n200c.r_s, (7.616880211930209e+21 * u.m))
assert_quantity_allclose(n200c.r_virial, (6.474348180140678e+22 * u.m))
# 200m Overdensity
massfactor = ("mean", 200)
n200m = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200m(4.2e+23 * u.m), (1.5194778058079436e-57 * (u.solMass / u.m ** 3),
3.0213446673751314e-27 * (u.kg / u.m ** 3)))
assert_quantity_allclose(n200m.rho_scale, (1.742188385322371e-52 * (u.solMass / u.m ** 3)))
assert_quantity_allclose(n200m.r_s, (8.76521436235054e+21 * u.m))
assert_quantity_allclose(n200m.r_virial, (7.450432207997959e+22 * u.m))
# Virial mass
massfactor = ("virial")
nvir = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(nvir(4.2e+23 * u.m), (1.5214899184117633e-57 * (u.solMass / u.m ** 3),
3.0253455719375224e-27 * (u.kg / u.m ** 3)))
assert_quantity_allclose(nvir.rho_scale, (1.922862338766335e-52 * (u.solMass / u.m ** 3)))
assert_quantity_allclose(nvir.r_s, (8.481607714647913e+21 * u.m))
assert_quantity_allclose(nvir.r_virial, (7.209366557450727e+22 * u.m))
# Verify string input of overdensity type
# 200c Overdensity
massfactor = "200c"
n200c = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200c(3.0 * u.Mpc), (3.709693508e+12 * (u.solMass / u.Mpc ** 3),
7.376391187e+42 * (u.kg / u.Mpc ** 3)))
# 200m Overdensity
massfactor = "200m"
n200m = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200m(3.0 * u.Mpc), (3.626093406e+12 * (u.solMass / u.Mpc**3),
7.210159921e+42 * (u.kg / u.Mpc**3)))
# Virial mass
massfactor = "virial"
nvir = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(nvir(3.0 * u.Mpc), (3.646475546e+12 * (u.solMass / u.Mpc**3),
7.250687967e+42 * (u.kg / u.Mpc**3)))
@pytest.mark.skipif("not HAS_SCIPY")
@pytest.mark.parametrize('fitter', fitters)
def test_NFW_fit(fitter):
"""Test linear fitting of NFW model."""
fitter = fitter()
if isinstance(fitter, DogBoxLSQFitter):
pytest.xfail("dogbox method is poor fitting method for NFW model")
# Fixed parameters
redshift = 0.63
cosmo = cosmology.Planck15
# Radial set
r = np.array([1.00e+01, 1.00e+02, 2.00e+02, 2.50e+02, 3.00e+02, 4.00e+02, 5.00e+02,
7.50e+02, 1.00e+03, 1.50e+03, 2.50e+03, 6.50e+03, 1.15e+04]) * u.kpc
# 200c Overdensity
massfactor = ("critical", 200)
density_r = np.array([1.77842761e+08, 9.75233623e+06, 2.93789626e+06, 1.90107238e+06,
1.30776878e+06, 7.01004140e+05, 4.20678479e+05, 1.57421880e+05,
7.54669701e+04, 2.56319769e+04, 6.21976562e+03, 3.96522424e+02,
7.39336808e+01]) * (u.solMass / u.kpc ** 3)
n200c = NFW(mass=1.8E15 * u.M_sun, concentration=7.0, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
n200c.redshift.fixed = True
n_fit = fitter(n200c, r, density_r, maxiter=1000)
assert_quantity_allclose(n_fit.mass, 2.0000000000000E15 * u.M_sun)
assert_quantity_allclose(n_fit.concentration, 8.5)
# 200m Overdensity
massfactor = ("mean", 200)
density_r = np.array([1.35677282e+08, 7.95392979e+06, 2.50352599e+06, 1.64535870e+06,
1.14642248e+06, 6.26805453e+05, 3.81691731e+05, 1.46294819e+05,
7.11559560e+04, 2.45737796e+04, 6.05459585e+03, 3.92183991e+02,
7.34674416e+01]) * (u.solMass / u.kpc ** 3)
n200m = NFW(mass=1.8E15 * u.M_sun, concentration=7.0, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
n200m.redshift.fixed = True
n_fit = fitter(n200m, r, density_r, maxiter=1000)
assert_quantity_allclose(n_fit.mass, 2.0000000000000E15 * u.M_sun)
assert_quantity_allclose(n_fit.concentration, 8.5)
# Virial mass
massfactor = ("virial", 200)
density_r = np.array([1.44573515e+08, 8.34873998e+06, 2.60137484e+06, 1.70348738e+06,
1.18337370e+06, 6.43994654e+05, 3.90800249e+05, 1.48930537e+05,
7.21856397e+04, 2.48289464e+04, 6.09477095e+03, 3.93248818e+02,
7.35821787e+01]) * (u.solMass / u.kpc ** 3)
nvir = NFW(mass=1.8E15 * u.M_sun, concentration=7.0, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
nvir.redshift.fixed = True
n_fit = fitter(nvir, r, density_r, maxiter=1000)
assert_quantity_allclose(n_fit.mass, 2.0000000000000E15 * u.M_sun)
assert_quantity_allclose(n_fit.concentration, 8.5)
def test_NFW_circular_velocity():
"""Test circular velocity and radial validation of NFW model."""
# Test parameters
mass = 2.0000000000000E15 * u.M_sun
concentration = 8.5
redshift = 0.63
cosmo = cosmology.Planck15
r_r = np.array([0.01, 0.1, 0.2, 0.25, 0.3, 0.4, 0.5, 0.75, 1.0, 1.5, 2.5, 6.5, 11.5]) * u.Mpc
# 200c Overdensity tests
massfactor = ("critical", 200)
n200c = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
circ_v_200c = np.array([702.45487454, 1812.4138346, 2150.50929296, 2231.5802568, 2283.96950242,
2338.45989696, 2355.78876772, 2332.41766543, 2276.89433811,
2154.53909153, 1950.07947819, 1512.37442943,
1260.94034541]) * (u.km / u.s)
assert_quantity_allclose(n200c.circular_velocity(r_r), circ_v_200c)
assert_quantity_allclose(n200c.r_max, (0.5338248204429641 * u.Mpc))
assert_quantity_allclose(n200c.v_max, (2356.7204380904027 * (u.km / u.s)))
# 200m Overdensity tests
massfactor = ("mean", 200)
mass = 1.0e14 * u.M_sun
concentration = 12.3
redshift = 1.5
n200m = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
circ_v_200m = np.array([670.18236647, 1088.9843324, 1046.82334367, 1016.88890732, 987.97273478,
936.00207134, 891.80115232, 806.63307977, 744.91002191, 659.33401039,
557.82823549, 395.9735786, 318.29863006]) * (u.km / u.s)
assert_quantity_allclose(n200m.circular_velocity(r_r), circ_v_200m)
assert_quantity_allclose(n200m.r_max, (0.10196917920081808 * u.Mpc))
assert_quantity_allclose(n200m.v_max, (1089.0224395818727 * (u.km / u.s)))
# Virial Overdensity tests
massfactor = ("virial")
mass = 1.2e+45 * u.kg
concentration = 2.4
redshift = 0.34
r_r = np.array([3.08567758e+20, 3.08567758e+21, 6.17135516e+21, 7.71419395e+21,
9.25703274e+21, 1.23427103e+22, 1.54283879e+22, 2.31425819e+22,
3.08567758e+22, 4.62851637e+22, 7.71419395e+22, 2.00569043e+23,
3.54852922e+23]) * u.m
nvir = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
circ_v_vir = np.array([205.87461783, 604.65091823, 793.9190629, 857.52516521, 908.90280843,
986.53582718, 1041.69089845, 1124.19719446, 1164.58270747, 1191.33193561,
1174.02934755, 1023.69360527, 895.52206321]) * (u.km / u.s)
assert_quantity_allclose(nvir.circular_velocity(r_r), circ_v_vir)
assert_quantity_allclose(nvir.r_max, (1.6484542328623448 * u.Mpc))
assert_quantity_allclose(nvir.v_max, (1192.3130989914962 * (u.km / u.s)))
def test_NFW_exceptions_and_warnings_and_misc():
"""Test NFW exceptions."""
# Arbitrary Test parameters
mass = 2.0000000000000E15 * u.M_sun
concentration = 8.5
redshift = 0.63
cosmo = cosmology.Planck15
massfactor = ("critical", 200)
r_r = np.array([1.00e+01, 1.00e+02, 2.00e+02, 2.50e+02, 3.00e+02, 4.00e+02, 5.00e+02,
7.50e+02, 1.00e+03, 1.50e+03, 2.50e+03, 6.50e+03, 1.15e+04]) * u.kpc
# Massfactor exception tests
with pytest.raises(ValueError) as exc:
NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=("not", "virial"))
assert exc.value.args[0] == "Massfactor 'not' not one of 'critical', 'mean', or 'virial'"
with pytest.raises(ValueError) as exc:
NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor="not virial")
assert exc.value.args[0] == ("Massfactor not virial string not of the form "
"'#m', '#c', or 'virial'")
with pytest.raises(TypeError) as exc:
NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=200)
assert exc.value.args[0] == "Massfactor 200 not a tuple or string"
# Verify unitless mass
# Density test
n200c = NFW(mass=mass.value, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200c(3000.0), (3.709693508e+12 * (u.solMass / u.Mpc ** 3),
7.376391187e+42 * (u.kg / u.Mpc ** 3)))
# Circular velocity test with unitless mass
circ_v_200c = np.array([702.45487454, 1812.4138346, 2150.50929296, 2231.5802568, 2283.96950242,
2338.45989696, 2355.78876772, 2332.41766543, 2276.89433811,
2154.53909153, 1950.07947819, 1512.37442943,
1260.94034541]) * (u.km / u.s)
assert_quantity_allclose(n200c.circular_velocity(r_r), circ_v_200c)
# test with unitless input velocity
assert_quantity_allclose(n200c.circular_velocity(r_r.value), circ_v_200c)
# Test Default Cosmology
ncos = NFW(mass=mass, concentration=concentration, redshift=redshift)
assert_quantity_allclose(ncos.A_NFW(concentration), 1.356554956501232)
|
1c83014eaebf1910fb068fa45696e360bcd91fdbbd872c7277e7dba1ce89b157 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import unittest.mock as mk
from math import cos, sin
import numpy as np
import pytest
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.modeling import models, rotations
from astropy.tests.helper import assert_quantity_allclose
from astropy.wcs import wcs
@pytest.mark.parametrize(('inp'), [(0, 0), (4000, -20.56), (-2001.5, 45.9),
(0, 90), (0, -90), (np.mgrid[:4, :6]),
([[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]),
([[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[17, 18, 19, 20],
[21, 22, 23, 24]]],
[[[25, 26, 27, 28],
[29, 30, 31, 32],
[33, 34, 35, 36]],
[[37, 38, 39, 40],
[41, 42, 43, 44],
[45, 46, 47, 48]]])])
def test_against_wcslib(inp):
w = wcs.WCS()
crval = [202.4823228, 47.17511893]
w.wcs.crval = crval
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
lonpole = 180
tan = models.Pix2Sky_TAN()
n2c = models.RotateNative2Celestial(crval[0], crval[1], lonpole)
c2n = models.RotateCelestial2Native(crval[0], crval[1], lonpole)
m = tan | n2c
minv = c2n | tan.inverse
radec = w.wcs_pix2world(inp[0], inp[1], 1)
xy = w.wcs_world2pix(radec[0], radec[1], 1)
assert_allclose(m(*inp), radec, atol=1e-12)
assert_allclose(minv(*radec), xy, atol=1e-12)
@pytest.mark.parametrize(('inp'), [(1e-5, 1e-4), (40, -20.56), (21.5, 45.9),
([[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]),
([[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[17, 18, 19, 20],
[21, 22, 23, 24]]],
[[[25, 26, 27, 28],
[29, 30, 31, 32],
[33, 34, 35, 36]],
[[37, 38, 39, 40],
[41, 42, 43, 44],
[45, 46, 47, 48]]])])
def test_roundtrip_sky_rotation(inp):
lon, lat, lon_pole = 42, 43, 44
n2c = models.RotateNative2Celestial(lon, lat, lon_pole)
c2n = models.RotateCelestial2Native(lon, lat, lon_pole)
assert_allclose(n2c.inverse(*n2c(*inp)), inp, atol=1e-13)
assert_allclose(c2n.inverse(*c2n(*inp)), inp, atol=1e-13)
def test_native_celestial_lat90():
n2c = models.RotateNative2Celestial(1, 90, 0)
alpha, delta = n2c(1, 1)
assert_allclose(delta, 1)
assert_allclose(alpha, 182)
def test_Rotation2D():
model = models.Rotation2D(angle=90)
x, y = model(1, 0)
assert_allclose([x, y], [0, 1], atol=1e-10)
def test_Rotation2D_quantity():
model = models.Rotation2D(angle=90*u.deg)
x, y = model(1*u.deg, 0*u.arcsec)
assert_quantity_allclose([x, y], [0, 1]*u.deg, atol=1e-10*u.deg)
def test_Rotation2D_inverse():
model = models.Rotation2D(angle=234.23494)
x, y = model.inverse(*model(1, 0))
assert_allclose([x, y], [1, 0], atol=1e-10)
def test_Rotation2D_errors():
model = models.Rotation2D(angle=90*u.deg)
# Bad evaluation input shapes
x = np.array([1, 2])
y = np.array([1, 2, 3])
message = "Expected input arrays to have the same shape"
with pytest.raises(ValueError) as err:
model.evaluate(x, y, model.angle)
assert str(err.value) == message
with pytest.raises(ValueError) as err:
model.evaluate(y, x, model.angle)
assert str(err.value) == message
# Bad evaluation units
x = np.array([1, 2])
y = np.array([1, 2])
message = "x and y must have compatible units"
with pytest.raises(u.UnitsError) as err:
model.evaluate(x * u.m, y, model.angle)
assert str(err.value) == message
def test_euler_angle_rotations():
x = (0, 0)
y = (90, 0)
z = (0, 90)
negx = (180, 0)
negy = (-90, 0)
# rotate y into minus z
model = models.EulerAngleRotation(0, 90, 0, 'zxz')
assert_allclose(model(*z), y, atol=10**-12)
# rotate z into minus x
model = models.EulerAngleRotation(0, 90, 0, 'zyz')
assert_allclose(model(*z), negx, atol=10**-12)
# rotate x into minus y
model = models.EulerAngleRotation(0, 90, 0, 'yzy')
assert_allclose(model(*x), negy, atol=10**-12)
euler_axes_order = ['zxz', 'zyz', 'yzy', 'yxy', 'xyx', 'xzx']
@pytest.mark.parametrize(('axes_order'), euler_axes_order)
def test_euler_angles(axes_order):
"""
Tests against all Euler sequences.
The rotation matrices definitions come from Wikipedia.
"""
phi = np.deg2rad(23.4)
theta = np.deg2rad(12.2)
psi = np.deg2rad(34)
c1 = cos(phi)
c2 = cos(theta)
c3 = cos(psi)
s1 = sin(phi)
s2 = sin(theta)
s3 = sin(psi)
matrices = {'zxz': np.array([[(c1*c3 - c2*s1*s3), (-c1*s3 - c2*c3*s1), (s1*s2)],
[(c3*s1 + c1*c2*s3), (c1*c2*c3 - s1*s3), (-c1*s2)],
[(s2*s3), (c3*s2), (c2)]]),
'zyz': np.array([[(c1*c2*c3 - s1*s3), (-c3*s1 - c1*c2*s3), (c1*s2)],
[(c1*s3 + c2*c3*s1), (c1*c3 - c2*s1*s3), (s1*s2)],
[(-c3*s2), (s2*s3), (c2)]]),
'yzy': np.array([[(c1*c2*c3 - s1*s3), (-c1*s2), (c3*s1+c1*c2*s3)],
[(c3*s2), (c2), (s2*s3)],
[(-c1*s3 - c2*c3*s1), (s1*s2), (c1*c3-c2*s1*s3)]]),
'yxy': np.array([[(c1*c3 - c2*s1*s3), (s1*s2), (c1*s3+c2*c3*s1)],
[(s2*s3), (c2), (-c3*s2)],
[(-c3*s1 - c1*c2*s3), (c1*s2), (c1*c2*c3 - s1*s3)]]),
'xyx': np.array([[(c2), (s2*s3), (c3*s2)],
[(s1*s2), (c1*c3 - c2*s1*s3), (-c1*s3 - c2*c3*s1)],
[(-c1*s2), (c3*s1 + c1*c2*s3), (c1*c2*c3 - s1*s3)]]),
'xzx': np.array([[(c2), (-c3*s2), (s2*s3)],
[(c1*s2), (c1*c2*c3 - s1*s3), (-c3*s1 - c1*c2*s3)],
[(s1*s2), (c1*s3 + c2*c3*s1), (c1*c3 - c2*s1*s3)]])
}
mat = rotations._create_matrix([phi, theta, psi], axes_order)
assert_allclose(mat.T, matrices[axes_order]) # get_rotation_matrix(axes_order))
def test_rotation_3d():
"""
A sanity test - when V2_REF = 0 and V3_REF = 0,
for V2, V3 close to the origin
ROLL_REF should be approximately PA_V3 .
(Test taken from JWST SIAF report.)
"""
def _roll_angle_from_matrix(matrix, v2, v3):
X = (
-(matrix[2, 0] * np.cos(v2) + matrix[2, 1] * np.sin(v2)) *
np.sin(v3) + matrix[2, 2] * np.cos(v3)
)
Y = (
(matrix[0, 0] * matrix[1, 2] - matrix[1, 0] * matrix[0, 2]) * np.cos(v2) +
(matrix[0, 1] * matrix[1, 2] - matrix[1, 1] * matrix[0, 2]) * np.sin(v2)
)
new_roll = np.rad2deg(np.arctan2(Y, X))
if new_roll < 0:
new_roll += 360
return new_roll
# reference points on sky and in a coordinate frame associated
# with the telescope
ra_ref = 165 # in deg
dec_ref = 54 # in deg
v2_ref = 0
v3_ref = 0
pa_v3 = 37 # in deg
v2 = np.deg2rad(2.7e-6) # in deg.01 # in arcsec
v3 = np.deg2rad(2.7e-6) # in deg .01 # in arcsec
angles = [v2_ref, -v3_ref, pa_v3, dec_ref, -ra_ref]
axes = "zyxyz"
M = rotations._create_matrix(np.deg2rad(angles) * u.deg, axes)
roll_angle = _roll_angle_from_matrix(M, v2, v3)
assert_allclose(roll_angle, pa_v3, atol=1e-3)
def test_spherical_rotation():
"""
Test taken from JWST INS report - converts
JWST telescope (V2, V3) coordinates to RA, DEC.
"""
ra_ref = 165 # in deg
dec_ref = 54 # in deg
v2_ref = -503.654472 / 3600 # in deg
v3_ref = -318.742464 / 3600 # in deg
r0 = 37 # in deg
v2 = 210 # in deg
v3 = -75 # in deg
expected_ra_dec = (107.12810484789563, -35.97940247128502) # in deg
angles = np.array([v2_ref, -v3_ref, r0, dec_ref, -ra_ref])
axes = "zyxyz"
v2s = rotations.RotationSequence3D(angles, axes_order=axes)
x, y, z = rotations.spherical2cartesian(v2, v3)
x1, y1, z1 = v2s(x, y, z)
radec = rotations.cartesian2spherical(x1, y1, z1)
assert_allclose(radec, expected_ra_dec, atol=1e-10)
v2s = rotations.SphericalRotationSequence(angles, axes_order=axes)
radec = v2s(v2, v3)
assert_allclose(radec, expected_ra_dec, atol=1e-10)
def test_RotationSequence3D_errors():
# Bad axes_order labels
with pytest.raises(ValueError, match=r"Unrecognized axis label .* should be one of .*"):
rotations.RotationSequence3D(mk.MagicMock(), axes_order="abc")
# Bad number of angles
with pytest.raises(ValueError) as err:
rotations.RotationSequence3D([1, 2, 3, 4], axes_order="zyx")
assert str(err.value) == "The number of angles 4 should match the number of axes 3."
# Bad evaluation input shapes
model = rotations.RotationSequence3D([1, 2, 3], axes_order="zyx")
message = "Expected input arrays to have the same shape"
with pytest.raises(ValueError) as err:
model.evaluate(np.array([1, 2, 3]),
np.array([1, 2]),
np.array([1, 2]),
[1, 2, 3])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
model.evaluate(np.array([1, 2]),
np.array([1, 2, 3]),
np.array([1, 2]),
[1, 2, 3])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
model.evaluate(np.array([1, 2]),
np.array([1, 2]),
np.array([1, 2, 3]),
[1, 2, 3])
assert str(err.value) == message
def test_RotationSequence3D_inverse():
model = rotations.RotationSequence3D([1, 2, 3], axes_order="zyx")
assert_allclose(model.inverse.angles.value, [-3, -2, -1])
assert model.inverse.axes_order == "xyz"
def test_EulerAngleRotation_errors():
# Bad length of axes_order
with pytest.raises(TypeError) as err:
rotations.EulerAngleRotation(mk.MagicMock(), mk.MagicMock(), mk.MagicMock(),
axes_order="xyzx")
assert str(err.value) == "Expected axes_order to be a character sequence of length 3, got xyzx"
# Bad axes_order labels
with pytest.raises(ValueError, match=r"Unrecognized axis label .* should be one of .*"):
rotations.EulerAngleRotation(mk.MagicMock(), mk.MagicMock(), mk.MagicMock(),
axes_order="abc")
# Bad units
message = "All parameters should be of the same type - float or Quantity."
with pytest.raises(TypeError) as err:
rotations.EulerAngleRotation(1 * u.m, 2, 3,
axes_order="xyz")
assert str(err.value) == message
with pytest.raises(TypeError) as err:
rotations.EulerAngleRotation(1, 2 * u.m, 3,
axes_order="xyz")
assert str(err.value) == message
with pytest.raises(TypeError) as err:
rotations.EulerAngleRotation(1, 2, 3 * u.m,
axes_order="xyz")
assert str(err.value) == message
def test_EulerAngleRotation_inverse():
model = rotations.EulerAngleRotation(1, 2, 3, "xyz")
assert_allclose(model.inverse.phi, -3)
assert_allclose(model.inverse.theta, -2)
assert_allclose(model.inverse.psi, -1)
assert model.inverse.axes_order == "zyx"
def test__SkyRotation_errors():
# Bad units
message = "All parameters should be of the same type - float or Quantity."
with pytest.raises(TypeError) as err:
rotations._SkyRotation(1 * u.m, 2, 3)
assert str(err.value) == message
with pytest.raises(TypeError) as err:
rotations._SkyRotation(1, 2 * u.m, 3)
assert str(err.value) == message
with pytest.raises(TypeError) as err:
rotations._SkyRotation(1, 2, 3 * u.m)
assert str(err.value) == message
def test__SkyRotation__evaluate():
model = rotations._SkyRotation(1, 2, 3)
phi = mk.MagicMock()
theta = mk.MagicMock()
lon = mk.MagicMock()
lat = mk.MagicMock()
lon_pole = mk.MagicMock()
alpha = 5
delta = mk.MagicMock()
with mk.patch.object(rotations._EulerRotation, 'evaluate',
autospec=True, return_value=(alpha, delta)) as mkEval:
assert (365, delta) == model._evaluate(phi, theta, lon, lat, lon_pole)
assert mkEval.call_args_list == [mk.call(model, phi, theta, lon, lat, lon_pole, 'zxz')]
|
7dd627dbd7e1ab63bcdb2fdcce850d376687d51719a238473a91997815a8f7f5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import platform
import types
import warnings
import numpy as np
import pytest
from numpy.random import default_rng
from numpy.testing import assert_allclose
from astropy.modeling import fitting, models
from astropy.modeling.core import Fittable1DModel
from astropy.modeling.parameters import Parameter
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa: F401
from astropy.utils.exceptions import AstropyUserWarning
fitters = [
fitting.LevMarLSQFitter,
fitting.TRFLSQFitter,
fitting.LevMarLSQFitter,
fitting.DogBoxLSQFitter
]
class TestNonLinearConstraints:
def setup_class(self):
self.g1 = models.Gaussian1D(10, 14.9, stddev=.3)
self.g2 = models.Gaussian1D(10, 13, stddev=.4)
self.x = np.arange(10, 20, .1)
self.y1 = self.g1(self.x)
self.y2 = self.g2(self.x)
rsn = default_rng(1234567890)
self.n = rsn.standard_normal(100)
self.ny1 = self.y1 + 2 * self.n
self.ny2 = self.y2 + 2 * self.n
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', fitters)
def test_fixed_par(self, fitter):
fitter = fitter()
g1 = models.Gaussian1D(10, mean=14.9, stddev=.3,
fixed={'amplitude': True})
model = fitter(g1, self.x, self.ny1)
assert model.amplitude.value == 10
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize("fitter", fitters)
def test_tied_par(self, fitter):
fitter = fitter()
def tied(model):
mean = 50 * model.stddev
return mean
g1 = models.Gaussian1D(10, mean=14.9, stddev=.3, tied={'mean': tied})
model = fitter(g1, self.x, self.ny1)
assert_allclose(model.mean.value, 50 * model.stddev,
rtol=10 ** (-5))
@pytest.mark.skipif('not HAS_SCIPY')
def test_joint_fitter(self):
from scipy import optimize
g1 = models.Gaussian1D(10, 14.9, stddev=.3)
g2 = models.Gaussian1D(10, 13, stddev=.4)
jf = fitting.JointFitter([g1, g2], {g1: ['amplitude'],
g2: ['amplitude']}, [9.8])
x = np.arange(10, 20, .1)
y1 = g1(x)
y2 = g2(x)
n = np.random.randn(100)
ny1 = y1 + 2 * n
ny2 = y2 + 2 * n
jf(x, ny1, x, ny2)
p1 = [14.9, .3]
p2 = [13, .4]
A = 9.8
p = np.r_[A, p1, p2]
def compmodel(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errf(p, x1, y1, x2, y2):
return np.ravel(
np.r_[compmodel(p[0], p[1:3], x1) - y1,
compmodel(p[0], p[3:], x2) - y2])
fitparams, _ = optimize.leastsq(errf, p, args=(x, ny1, x, ny2))
assert_allclose(jf.fitparams, fitparams, rtol=10 ** (-5))
assert_allclose(g1.amplitude.value, g2.amplitude.value)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize("fitter", fitters)
def test_no_constraints(self, fitter):
from scipy import optimize
fitter = fitter()
g1 = models.Gaussian1D(9.9, 14.5, stddev=.3)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errf(p, x, y):
return func(p, x) - y
p0 = [9.9, 14.5, 0.3]
y = g1(self.x)
n = np.random.randn(100)
ny = y + n
fitpar, s = optimize.leastsq(errf, p0, args=(self.x, ny))
model = fitter(g1, self.x, ny)
assert_allclose(model.parameters, fitpar, rtol=5 * 10 ** (-3))
@pytest.mark.skipif('not HAS_SCIPY')
class TestBounds:
def setup_class(self):
A = -2.0
B = 0.5
self.x = np.linspace(-1.0, 1.0, 100)
self.y = A * self.x + B + np.random.normal(scale=0.1, size=100)
data = np.array([505.0, 556.0, 630.0, 595.0, 561.0, 553.0, 543.0, 496.0, 460.0, 469.0,
426.0, 518.0, 684.0, 798.0, 830.0, 794.0, 649.0, 706.0, 671.0, 545.0,
479.0, 454.0, 505.0, 700.0, 1058.0, 1231.0, 1325.0, 997.0, 1036.0, 884.0,
610.0, 487.0, 453.0, 527.0, 780.0, 1094.0, 1983.0, 1993.0, 1809.0, 1525.0,
1056.0, 895.0, 604.0, 466.0, 510.0, 678.0, 1130.0, 1986.0, 2670.0, 2535.0,
1878.0, 1450.0, 1200.0, 663.0, 511.0, 474.0, 569.0, 848.0, 1670.0, 2611.0,
3129.0, 2507.0, 1782.0, 1211.0, 723.0, 541.0, 511.0, 518.0, 597.0, 1137.0,
1993.0, 2925.0, 2438.0, 1910.0, 1230.0, 738.0, 506.0, 461.0, 486.0, 597.0,
733.0, 1262.0, 1896.0, 2342.0, 1792.0, 1180.0, 667.0, 482.0, 454.0, 482.0,
504.0, 566.0, 789.0, 1194.0, 1545.0, 1361.0, 933.0, 562.0, 418.0, 463.0,
435.0, 466.0, 528.0, 487.0, 664.0, 799.0, 746.0, 550.0, 478.0, 535.0,
443.0, 416.0, 439.0, 472.0, 472.0, 492.0, 523.0, 569.0, 487.0, 441.0,
428.0])
self.data = data.reshape(11, 11)
@pytest.mark.parametrize("fitter", fitters)
def test_bounds_lsq(self, fitter):
fitter = fitter()
guess_slope = 1.1
guess_intercept = 0.0
bounds = {'slope': (-1.5, 5.0), 'intercept': (-1.0, 1.0)}
line_model = models.Linear1D(guess_slope, guess_intercept,
bounds=bounds)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
model = fitter(line_model, self.x, self.y)
slope = model.slope.value
intercept = model.intercept.value
assert slope + 10 ** -5 >= bounds['slope'][0]
assert slope - 10 ** -5 <= bounds['slope'][1]
assert intercept + 10 ** -5 >= bounds['intercept'][0]
assert intercept - 10 ** -5 <= bounds['intercept'][1]
def test_bounds_slsqp(self):
guess_slope = 1.1
guess_intercept = 0.0
bounds = {'slope': (-1.5, 5.0), 'intercept': (-1.0, 1.0)}
line_model = models.Linear1D(guess_slope, guess_intercept,
bounds=bounds)
fitter = fitting.SLSQPLSQFitter()
with pytest.warns(AstropyUserWarning, match='consider using linear fitting methods'):
model = fitter(line_model, self.x, self.y)
slope = model.slope.value
intercept = model.intercept.value
assert slope + 10 ** -5 >= bounds['slope'][0]
assert slope - 10 ** -5 <= bounds['slope'][1]
assert intercept + 10 ** -5 >= bounds['intercept'][0]
assert intercept - 10 ** -5 <= bounds['intercept'][1]
@pytest.mark.parametrize("fitter", fitters)
def test_bounds_gauss2d_lsq(self, fitter):
fitter = fitter()
X, Y = np.meshgrid(np.arange(11), np.arange(11))
bounds = {"x_mean": [0., 11.],
"y_mean": [0., 11.],
"x_stddev": [1., 4],
"y_stddev": [1., 4]}
gauss = models.Gaussian2D(amplitude=10., x_mean=5., y_mean=5.,
x_stddev=4., y_stddev=4., theta=0.5,
bounds=bounds)
if (isinstance(fitter, fitting.LevMarLSQFitter) or
isinstance(fitter, fitting.DogBoxLSQFitter)):
with pytest.warns(AstropyUserWarning,
match='The fit may be unsuccessful'):
model = fitter(gauss, X, Y, self.data)
else:
model = fitter(gauss, X, Y, self.data)
x_mean = model.x_mean.value
y_mean = model.y_mean.value
x_stddev = model.x_stddev.value
y_stddev = model.y_stddev.value
assert x_mean + 10 ** -5 >= bounds['x_mean'][0]
assert x_mean - 10 ** -5 <= bounds['x_mean'][1]
assert y_mean + 10 ** -5 >= bounds['y_mean'][0]
assert y_mean - 10 ** -5 <= bounds['y_mean'][1]
assert x_stddev + 10 ** -5 >= bounds['x_stddev'][0]
assert x_stddev - 10 ** -5 <= bounds['x_stddev'][1]
assert y_stddev + 10 ** -5 >= bounds['y_stddev'][0]
assert y_stddev - 10 ** -5 <= bounds['y_stddev'][1]
def test_bounds_gauss2d_slsqp(self):
X, Y = np.meshgrid(np.arange(11), np.arange(11))
bounds = {"x_mean": [0., 11.],
"y_mean": [0., 11.],
"x_stddev": [1., 4],
"y_stddev": [1., 4]}
gauss = models.Gaussian2D(amplitude=10., x_mean=5., y_mean=5.,
x_stddev=4., y_stddev=4., theta=0.5,
bounds=bounds)
gauss_fit = fitting.SLSQPLSQFitter()
# Warning does not appear in all the CI jobs.
# TODO: Rewrite the test for more consistent warning behavior.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=r'.*The fit may be unsuccessful.*',
category=AstropyUserWarning)
model = gauss_fit(gauss, X, Y, self.data)
x_mean = model.x_mean.value
y_mean = model.y_mean.value
x_stddev = model.x_stddev.value
y_stddev = model.y_stddev.value
assert x_mean + 10 ** -5 >= bounds['x_mean'][0]
assert x_mean - 10 ** -5 <= bounds['x_mean'][1]
assert y_mean + 10 ** -5 >= bounds['y_mean'][0]
assert y_mean - 10 ** -5 <= bounds['y_mean'][1]
assert x_stddev + 10 ** -5 >= bounds['x_stddev'][0]
assert x_stddev - 10 ** -5 <= bounds['x_stddev'][1]
assert y_stddev + 10 ** -5 >= bounds['y_stddev'][0]
assert y_stddev - 10 ** -5 <= bounds['y_stddev'][1]
class TestLinearConstraints:
def setup_class(self):
self.p1 = models.Polynomial1D(4)
self.p1.c0 = 0
self.p1.c1 = 0
self.p1.window = [0., 9.]
self.x = np.arange(10)
self.y = self.p1(self.x)
rsn = default_rng(1234567890)
self.n = rsn.standard_normal(10)
self.ny = self.y + self.n
def test(self):
self.p1.c0.fixed = True
self.p1.c1.fixed = True
pfit = fitting.LinearLSQFitter()
model = pfit(self.p1, self.x, self.y)
assert_allclose(self.y, model(self.x))
# Test constraints as parameter properties
def test_set_fixed_1():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.mean.fixed = True
assert gauss.fixed == {'amplitude': False, 'mean': True, 'stddev': False}
def test_set_fixed_2():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
fixed={'mean': True})
assert gauss.mean.fixed is True
def test_set_tied_1():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.amplitude.tied = tie_amplitude
assert gauss.amplitude.tied is not False
assert isinstance(gauss.tied['amplitude'], types.FunctionType)
def test_set_tied_2():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
tied={'amplitude': tie_amplitude})
assert gauss.amplitude.tied
def test_unset_fixed():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
fixed={'mean': True})
gauss.mean.fixed = False
assert gauss.fixed == {'amplitude': False, 'mean': False, 'stddev': False}
def test_unset_tied():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
tied={'amplitude': tie_amplitude})
gauss.amplitude.tied = False
assert gauss.tied == {'amplitude': False, 'mean': False, 'stddev': False}
def test_set_bounds_1():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
bounds={'stddev': (0, None)})
assert gauss.bounds == {'amplitude': (None, None),
'mean': (None, None),
'stddev': (0.0, None)}
def test_set_bounds_2():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.stddev.min = 0.
assert gauss.bounds == {'amplitude': (None, None),
'mean': (None, None),
'stddev': (0.0, None)}
def test_unset_bounds():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
bounds={'stddev': (0, 2)})
gauss.stddev.min = None
gauss.stddev.max = None
assert gauss.bounds == {'amplitude': (None, None),
'mean': (None, None),
'stddev': (None, None)}
def test_default_constraints():
"""Regression test for https://github.com/astropy/astropy/issues/2396
Ensure that default constraints defined on parameters are carried through
to instances of the models those parameters are defined for.
"""
class MyModel(Fittable1DModel):
a = Parameter(default=1)
b = Parameter(default=0, min=0, fixed=True)
@staticmethod
def evaluate(x, a, b):
return x * a + b
assert MyModel.a.default == 1
assert MyModel.b.default == 0
assert MyModel.b.min == 0
assert MyModel.b.bounds == (0, None)
assert MyModel.b.fixed is True
m = MyModel()
assert m.a.value == 1
assert m.b.value == 0
assert m.b.min == 0
assert m.b.bounds == (0, None)
assert m.b.fixed is True
assert m.bounds == {'a': (None, None), 'b': (0, None)}
assert m.fixed == {'a': False, 'b': True}
# Make a model instance that overrides the default constraints and values
m = MyModel(3, 4, bounds={'a': (1, None), 'b': (2, None)},
fixed={'a': True, 'b': False})
assert m.a.value == 3
assert m.b.value == 4
assert m.a.min == 1
assert m.b.min == 2
assert m.a.bounds == (1, None)
assert m.b.bounds == (2, None)
assert m.a.fixed is True
assert m.b.fixed is False
assert m.bounds == {'a': (1, None), 'b': (2, None)}
assert m.fixed == {'a': True, 'b': False}
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings(r'ignore:divide by zero encountered.*')
@pytest.mark.parametrize('fitter', fitters)
def test_fit_with_fixed_and_bound_constraints(fitter):
"""
Regression test for https://github.com/astropy/astropy/issues/2235
Currently doesn't test that the fit is any *good*--just that parameters
stay within their given constraints.
"""
# DogBoxLSQFitter causes failure on s390x, aremel possibly others (not x86_64 or arm64)
if fitter == fitting.DogBoxLSQFitter and (platform.machine() not in ('x86_64', 'arm64')):
pytest.xfail("DogBoxLSQFitter can to be unstable on non-standard platforms leading to "
"random test failures")
fitter = fitter()
m = models.Gaussian1D(amplitude=3, mean=4, stddev=1,
bounds={'mean': (4, 5)},
fixed={'amplitude': True})
x = np.linspace(0, 10, 10)
y = np.exp(-x ** 2 / 2)
fitted_1 = fitter(m, x, y)
assert fitted_1.mean >= 4
assert fitted_1.mean <= 5
assert fitted_1.amplitude == 3.0
m.amplitude.fixed = False
_ = fitter(m, x, y)
# It doesn't matter anymore what the amplitude ends up as so long as the
# bounds constraint was still obeyed
assert fitted_1.mean >= 4
assert fitted_1.mean <= 5
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', fitters)
def test_fit_with_bound_constraints_estimate_jacobian(fitter):
"""
Regression test for https://github.com/astropy/astropy/issues/2400
Checks that bounds constraints are obeyed on a custom model that does not
define fit_deriv (and thus its Jacobian must be estimated for non-linear
fitting).
"""
fitter = fitter()
class MyModel(Fittable1DModel):
a = Parameter(default=1)
b = Parameter(default=2)
@staticmethod
def evaluate(x, a, b):
return a * x + b
m_real = MyModel(a=1.5, b=-3)
x = np.arange(100)
y = m_real(x)
m = MyModel()
fitted_1 = fitter(m, x, y)
# This fit should be trivial so even without constraints on the bounds it
# should be right
assert np.allclose(fitted_1.a, 1.5)
assert np.allclose(fitted_1.b, -3)
m2 = MyModel()
m2.a.bounds = (-2, 2)
_ = fitter(m2, x, y)
assert np.allclose(fitted_1.a, 1.5)
assert np.allclose(fitted_1.b, -3)
# Check that the estimated Jacobian was computed (it doesn't matter what
# the values are so long as they're not all zero.
if fitter == fitting.LevMarLSQFitter:
assert np.any(fitter.fit_info['fjac'] != 0)
# https://github.com/astropy/astropy/issues/6014
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', fitters)
def test_gaussian2d_positive_stddev(fitter):
# This is 2D Gaussian with noise to be fitted, as provided by @ysBach
fitter = fitter()
test = [
[-54.33, 13.81, -34.55, 8.95, -143.71, -0.81, 59.25, -14.78, -204.9,
-30.87, -124.39, 123.53, 70.81, -109.48, -106.77, 35.64, 18.29],
[-126.19, -89.13, 63.13, 50.74, 61.83, 19.06, 65.7, 77.94, 117.14,
139.37, 52.57, 236.04, 100.56, 242.28, -180.62, 154.02, -8.03],
[91.43, 96.45, -118.59, -174.58, -116.49, 80.11, -86.81, 14.62, 79.26,
7.56, 54.99, 260.13, -136.42, -20.77, -77.55, 174.52, 134.41],
[33.88, 7.63, 43.54, 70.99, 69.87, 33.97, 273.75, 176.66, 201.94,
336.34, 340.54, 163.77, -156.22, 21.49, -148.41, 94.88, 42.55],
[82.28, 177.67, 26.81, 17.66, 47.81, -31.18, 353.23, 589.11, 553.27,
242.35, 444.12, 186.02, 140.73, 75.2, -87.98, -18.23, 166.74],
[113.09, -37.01, 134.23, 71.89, 107.88, 198.69, 273.88, 626.63, 551.8,
547.61, 580.35, 337.8, 139.8, 157.64, -1.67, -26.99, 37.35],
[106.47, 31.97, 84.99, -125.79, 195.0, 493.65, 861.89, 908.31, 803.9,
781.01, 532.59, 404.67, 115.18, 111.11, 28.08, 122.05, -58.36],
[183.62, 45.22, 40.89, 111.58, 425.81, 321.53, 545.09, 866.02, 784.78,
731.35, 609.01, 405.41, -19.65, 71.2, -140.5, 144.07, 25.24],
[137.13, -86.95, 15.39, 180.14, 353.23, 699.01, 1033.8, 1014.49,
814.11, 647.68, 461.03, 249.76, 94.8, 41.17, -1.16, 183.76, 188.19],
[35.39, 26.92, 198.53, -37.78, 638.93, 624.41, 816.04, 867.28, 697.0,
491.56, 378.21, -18.46, -65.76, 98.1, 12.41, -102.18, 119.05],
[190.73, 125.82, 311.45, 369.34, 554.39, 454.37, 755.7, 736.61, 542.43,
188.24, 214.86, 217.91, 7.91, 27.46, -172.14, -82.36, -80.31],
[-55.39, 80.18, 267.19, 274.2, 169.53, 327.04, 488.15, 437.53, 225.38,
220.94, 4.01, -92.07, 39.68, 57.22, 144.66, 100.06, 34.96],
[130.47, -4.23, 46.3, 101.49, 115.01, 217.38, 249.83, 115.9, 87.36,
105.81, -47.86, -9.94, -82.28, 144.45, 83.44, 23.49, 183.9],
[-110.38, -115.98, 245.46, 103.51, 255.43, 163.47, 56.52, 33.82,
-33.26, -111.29, 88.08, 193.2, -100.68, 15.44, 86.32, -26.44, -194.1],
[109.36, 96.01, -124.89, -16.4, 84.37, 114.87, -65.65, -58.52, -23.22,
42.61, 144.91, -209.84, 110.29, 66.37, -117.85, -147.73, -122.51],
[10.94, 45.98, 118.12, -46.53, -72.14, -74.22, 21.22, 0.39, 86.03,
23.97, -45.42, 12.05, -168.61, 27.79, 61.81, 84.07, 28.79],
[46.61, -104.11, 56.71, -90.85, -16.51, -66.45, -141.34, 0.96, 58.08,
285.29, -61.41, -9.01, -323.38, 58.35, 80.14, -101.22, 145.65]]
g_init = models.Gaussian2D(x_mean=8, y_mean=8)
if isinstance(fitter, fitting.TRFLSQFitter) or isinstance(fitter, fitting.DogBoxLSQFitter):
pytest.xfail("TRFLSQFitter seems to be broken for this test.")
y, x = np.mgrid[:17, :17]
g_fit = fitter(g_init, x, y, test)
# Compare with @ysBach original result:
# - x_stddev was negative, so its abs value is used for comparison here.
# - theta is beyond (-90, 90) deg, which doesn't make sense, so ignored.
assert_allclose([g_fit.amplitude.value, g_fit.y_stddev.value],
[984.7694929790363, 3.1840618351417307], rtol=1.5e-6)
assert_allclose(g_fit.x_mean.value, 7.198391516587464)
assert_allclose(g_fit.y_mean.value, 7.49720660088511, rtol=5e-7)
assert_allclose(g_fit.x_stddev.value, 1.9840185107597297, rtol=2e-6)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings(r'ignore:Model is linear in parameters.*')
@pytest.mark.parametrize('fitter', fitters)
def test_2d_model(fitter):
"""Issue #6403"""
from astropy.utils import NumpyRNGContext
fitter = fitter()
# 2D model with LevMarLSQFitter
gauss2d = models.Gaussian2D(10.2, 4.3, 5, 2, 1.2, 1.4)
X = np.linspace(-1, 7, 200)
Y = np.linspace(-1, 7, 200)
x, y = np.meshgrid(X, Y)
z = gauss2d(x, y)
w = np.ones(x.size)
w.shape = x.shape
with NumpyRNGContext(1234567890):
n = np.random.randn(x.size)
n.shape = x.shape
m = fitter(gauss2d, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
m = fitter(gauss2d, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
# 2D model with LevMarLSQFitter, fixed constraint
gauss2d.x_stddev.fixed = True
m = fitter(gauss2d, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
m = fitter(gauss2d, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
# Polynomial2D, col_fit_deriv=False
p2 = models.Polynomial2D(1, c0_0=1, c1_0=1.2, c0_1=3.2)
z = p2(x, y)
m = fitter(p2, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
m = fitter(p2, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
# Polynomial2D, col_fit_deriv=False, fixed constraint
p2.c1_0.fixed = True
m = fitter(p2, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
m = fitter(p2, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
def test_set_prior_posterior():
model = models.Polynomial1D(1)
model.c0.prior = models.Gaussian1D(2.3, 2, .1)
assert model.c0.prior(2) == 2.3
model.c0.posterior = models.Linear1D(1, .2)
assert model.c0.posterior(1) == 1.2
def test_set_constraints():
g = models.Gaussian1D()
p = models.Polynomial1D(1)
# Set bounds before model combination
g.stddev.bounds = (0, 3)
m = g + p
assert m.bounds == {'amplitude_0': (None, None),
'mean_0': (None, None),
'stddev_0': (0.0, 3.0),
'c0_1': (None, None),
'c1_1': (None, None)}
# Set bounds on the compound model
m.stddev_0.bounds = (1, 3)
assert m.bounds == {'amplitude_0': (None, None),
'mean_0': (None, None),
'stddev_0': (1.0, 3.0),
'c0_1': (None, None),
'c1_1': (None, None)}
# Set the bounds of a Parameter directly in the bounds dict
m.bounds['stddev_0'] = (4, 5)
assert m.bounds == {'amplitude_0': (None, None),
'mean_0': (None, None),
'stddev_0': (4, 5),
'c0_1': (None, None),
'c1_1': (None, None)}
# Set the bounds of a Parameter on the child model bounds dict
g.bounds['stddev'] = (1, 5)
m = g + p
assert m.bounds == {'amplitude_0': (None, None),
'mean_0': (None, None),
'stddev_0': (1, 5),
'c0_1': (None, None),
'c1_1': (None, None)}
|
34822c47bb3366a6dbbd686087d5cff5d2a0d7e5d9f98093e2f0088f5542f092 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.modeling.fitting import DogBoxLSQFitter, LevMarLSQFitter, LMLSQFitter, TRFLSQFitter
from astropy.modeling.models import Gaussian1D, Identity, Mapping, Rotation2D, Shift, UnitsMapping
from astropy.utils import NumpyRNGContext
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa: F401
fitters = [LevMarLSQFitter, TRFLSQFitter, LMLSQFitter, DogBoxLSQFitter]
def test_swap_axes():
x = np.zeros((2, 3))
y = np.ones((2, 3))
mapping = Mapping((1, 0))
assert mapping(1, 2) == (2.0, 1.0)
assert mapping.inverse(2, 1) == (1, 2)
assert_array_equal(mapping(x, y), (y, x))
assert_array_equal(mapping.inverse(y, x), (x, y))
def test_duplicate_axes():
mapping = Mapping((0, 1, 0, 1))
assert mapping(1, 2) == (1.0, 2., 1., 2)
assert mapping.inverse(1, 2, 1, 2) == (1, 2)
assert mapping.inverse.n_inputs == 4
assert mapping.inverse.n_outputs == 2
def test_drop_axes_1():
mapping = Mapping((0,), n_inputs=2)
assert mapping(1, 2) == (1.)
def test_drop_axes_2():
mapping = Mapping((1, ))
assert mapping(1, 2) == (2.)
with pytest.raises(NotImplementedError):
mapping.inverse
def test_drop_axes_3():
mapping = Mapping((1,), n_inputs=2)
assert mapping.n_inputs == 2
rotation = Rotation2D(60)
model = rotation | mapping
assert_allclose(model(1, 2), 1.86602540378)
@pytest.mark.parametrize('name', [None, 'test_name'])
def test_bad_inputs(name):
mapping = Mapping((1, 0), name=name)
if name is None:
name = "Mapping"
x = [np.ones((2, 3))*idx for idx in range(5)]
for idx in range(1, 6):
if idx == 2:
continue
with pytest.raises(TypeError) as err:
mapping.evaluate(*x[:idx])
assert str(err.value) == f"{name} expects 2 inputs; got {idx}"
def test_identity():
x = np.zeros((2, 3))
y = np.ones((2, 3))
ident1 = Identity(1)
shift = Shift(1)
rotation = Rotation2D(angle=60)
model = ident1 & shift | rotation
assert_allclose(model(1, 2), (-2.098076211353316, 2.3660254037844393))
res_x, res_y = model(x, y)
assert_allclose((res_x, res_y),
(np.array([[-1.73205081, -1.73205081, -1.73205081],
[-1.73205081, -1.73205081, -1.73205081]]),
np.array([[1., 1., 1.],
[1., 1., 1.]])))
assert_allclose(model.inverse(res_x, res_y), (x, y), atol=1.e-10)
# https://github.com/astropy/astropy/pull/6018
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', fitters)
def test_fittable_compound(fitter):
fitter = fitter()
m = Identity(1) | Mapping((0, )) | Gaussian1D(1, 5, 4)
x = np.arange(10)
y_real = m(x)
dy = 0.005
with NumpyRNGContext(1234567):
n = np.random.normal(0., dy, x.shape)
y_noisy = y_real + n
new_model = fitter(m, x, y_noisy)
y_fit = new_model(x)
assert_allclose(y_fit, y_real, atol=dy)
def test_identity_repr():
m = Identity(1, name='foo')
assert repr(m) == "<Identity(1, name='foo')>"
m = Identity(1)
assert repr(m) == "<Identity(1)>"
def test_mapping_repr():
m = Mapping([0, 1], name='foo')
assert repr(m) == "<Mapping([0, 1], name='foo')>"
m = Mapping([0, 1])
assert repr(m) == "<Mapping([0, 1])>"
class TestUnitsMapping:
def test___init__(self):
# Set values
model = UnitsMapping(((u.m, None),),
input_units_equivalencies='test_eqiv',
input_units_allow_dimensionless=True,
name='test')
assert model._mapping == ((u.m, None),)
assert model._input_units_strict == {'x': True}
assert model.input_units_equivalencies == 'test_eqiv'
assert model.input_units_allow_dimensionless == {'x': True}
assert model.name == 'test'
assert model._input_units == {'x': u.m}
# Default values
model = UnitsMapping(((u.K, None),))
assert model._mapping == ((u.K, None),)
assert model._input_units_strict == {'x': True}
assert model.input_units_equivalencies is None
assert model.input_units_allow_dimensionless == {'x': False}
assert model.name is None
assert model._input_units == {'x': u.K}
# Error
with pytest.raises(ValueError) as err:
UnitsMapping(((u.m, None), (u.m, u.K)))
assert str(err.value) == "If one return unit is None, then all must be None"
def test_evaluate(self):
model = UnitsMapping(((u.m, None),))
assert model(10*u.m) == 10
model = UnitsMapping(((u.m, u.K),))
assert model(10*u.m) == 10 * u.K
model = UnitsMapping(((u.m, None), (u.K, None)),)
assert model(10*u.m, 20*u.K) == (10, 20)
model = UnitsMapping(((u.m, u.K), (u.K, u.m)),)
assert model(10*u.m, 20*u.K) == (10*u.K, 20*u.m)
def test_repr(self):
model = UnitsMapping(((u.m, None),), name='foo')
assert repr(model) == f"<UnitsMapping((({repr(u.m)}, None),), name='foo')>"
model = UnitsMapping(((u.m, None),))
assert repr(model) == f"<UnitsMapping((({repr(u.m)}, None),))>"
|
9b6d45b12056ad3e27e2c640d7173cb5dd2c91fc343f49785b26d8d7cb130748 | # Various tests of models not related to evaluation, fitting, or parameters
# pylint: disable=invalid-name, no-member
import pytest
from astropy import units as u
from astropy.modeling import models
from astropy.modeling.core import _ModelMeta
from astropy.modeling.models import Gaussian1D, Mapping, Pix2Sky_TAN
from astropy.tests.helper import assert_quantity_allclose
def test_gaussian1d_bounding_box():
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
bbox = g.bounding_box.bounding_box()
assert_quantity_allclose(bbox[0], 2.835 * u.m)
assert_quantity_allclose(bbox[1], 3.165 * u.m)
def test_gaussian1d_n_models():
g = Gaussian1D(
amplitude=[1 * u.J, 2. * u.J],
mean=[1 * u.m, 5000 * u.AA],
stddev=[0.1 * u.m, 100 * u.AA],
n_models=2)
assert_quantity_allclose(g(1.01 * u.m), [0.99501248, 0.] * u.J)
assert_quantity_allclose(
g(u.Quantity([1.01 * u.m, 5010 * u.AA])), [0.99501248, 1.990025] * u.J)
# FIXME: The following doesn't work as np.asanyarray doesn't work with a
# list of quantity objects.
# assert_quantity_allclose(g([1.01 * u.m, 5010 * u.AA]),
# [ 0.99501248, 1.990025] * u.J)
"""
Test the "rules" of model units.
"""
def test_quantity_call():
"""
Test that if constructed with Quanties models must be called with quantities.
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
g(10 * u.m)
with pytest.raises(u.UnitsError):
g(10)
def test_no_quantity_call():
"""
Test that if not constructed with Quantites they can be called without quantities.
"""
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
assert isinstance(g, Gaussian1D)
g(10)
def test_default_parameters():
# Test that calling with a quantity works when one of the parameters
# defaults to dimensionless
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm)
assert isinstance(g, Gaussian1D)
g(10*u.m)
def test_uses_quantity():
"""
Test Quantity
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
assert g.uses_quantity
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
assert not g.uses_quantity
g.mean = 3 * u.m
assert g.uses_quantity
def test_uses_quantity_compound():
"""
Test Quantity
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
g2 = Gaussian1D(mean=5 * u.m, stddev=5 * u.cm, amplitude=5 * u.Jy)
assert (g | g2).uses_quantity
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
g2 = Gaussian1D(mean=5, stddev=5, amplitude=5)
comp = g | g2
assert not (comp).uses_quantity
def test_uses_quantity_no_param():
comp = Mapping((0, 1)) | Pix2Sky_TAN()
assert comp.uses_quantity
def _allmodels():
allmodels = []
for name in dir(models):
model = getattr(models, name)
if type(model) is _ModelMeta:
try:
m = model()
except Exception:
pass
allmodels.append(m)
return allmodels
@pytest.mark.parametrize("m", _allmodels())
def test_read_only(m):
"""
input_units
return_units
input_units_allow_dimensionless
input_units_strict
"""
with pytest.raises(AttributeError):
m.input_units = {}
with pytest.raises(AttributeError):
m.return_units = {}
with pytest.raises(AttributeError):
m.input_units_allow_dimensionless = {}
with pytest.raises(AttributeError):
m.input_units_strict = {}
|
2c17fcbcc04fde01581435e4e8b1d1ceb1802f3ea9b2a0f568aa4d337e61014a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name, no-member
import numpy as np
import pytest
from astropy import units as u
from astropy.modeling.bounding_box import ModelBoundingBox
from astropy.modeling.core import fix_inputs
from astropy.modeling.fitting import DogBoxLSQFitter, LevMarLSQFitter, LMLSQFitter, TRFLSQFitter
from astropy.modeling.functional_models import (
AiryDisk2D, ArcCosine1D, ArcSine1D, ArcTangent1D, Box1D, Box2D, Const1D, Const2D, Cosine1D,
Disk2D, Ellipse2D, Exponential1D, Gaussian1D, Gaussian2D, KingProjectedAnalytic1D, Linear1D,
Logarithmic1D, Lorentz1D, Moffat1D, Moffat2D, Multiply, Planar2D, RickerWavelet1D,
RickerWavelet2D, Ring2D, Scale, Sersic1D, Sersic2D, Sine1D, Tangent1D, Trapezoid1D,
TrapezoidDisk2D, Voigt1D)
from astropy.modeling.parameters import InputParameterError
from astropy.modeling.physical_models import Drude1D, Plummer1D
from astropy.modeling.polynomial import Polynomial1D, Polynomial2D
from astropy.modeling.powerlaws import (
BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D, PowerLaw1D,
SmoothlyBrokenPowerLaw1D)
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
fitters = [LevMarLSQFitter, TRFLSQFitter, LMLSQFitter, DogBoxLSQFitter]
FUNC_MODELS_1D = [
{
'class': Gaussian1D,
'parameters': {'amplitude': 3 * u.Jy, 'mean': 2 * u.m, 'stddev': 30 * u.cm},
'evaluation': [(2600 * u.mm, 3 * u.Jy * np.exp(-2))],
'bounding_box': [0.35, 3.65] * u.m
},
{
'class': Sersic1D,
'parameters': {'amplitude': 3 * u.MJy / u.sr, 'r_eff': 2 * u.arcsec, 'n': 4},
'evaluation': [(3 * u.arcsec, 1.3237148119468918 * u.MJy/u.sr)],
'bounding_box': False
},
{
'class': Sine1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.5},
'evaluation': [(1 * u.s, -3 * u.km / u.s)],
'bounding_box': False
},
{
'class': Cosine1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.25},
'evaluation': [(1 * u.s, -3 * u.km / u.s)],
'bounding_box': False
},
{
'class': Tangent1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.125 * u.Hz, 'phase': 0.25},
'evaluation': [(1 * u.s, -3 * u.km / u.s)],
'bounding_box': [-4, 0] / u.Hz
},
{
'class': ArcSine1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.5},
'evaluation': [(0 * u.km / u.s, -2 * u.s)],
'bounding_box': [-3, 3] * u.km / u.s
},
{
'class': ArcCosine1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.5},
'evaluation': [(0 * u.km / u.s, -1 * u.s)],
'bounding_box': [-3, 3] * u.km / u.s
},
{
'class': ArcTangent1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.125 * u.Hz, 'phase': 0.25},
'evaluation': [(0 * u.km / u.s, -2 * u.s)],
'bounding_box': False
},
{
'class': Linear1D,
'parameters': {'slope': 3 * u.km / u.s, 'intercept': 5000 * u.m},
'evaluation': [(6000 * u.ms, 23 * u.km)],
'bounding_box': False
},
{
'class': Lorentz1D,
'parameters': {'amplitude': 2 * u.Jy, 'x_0': 505 * u.nm, 'fwhm': 100 * u.AA},
'evaluation': [(0.51 * u.micron, 1 * u.Jy)],
'bounding_box': [255, 755] * u.nm
},
{
'class': Voigt1D,
'parameters': {'amplitude_L': 2 * u.Jy, 'x_0': 505 * u.nm,
'fwhm_L': 100 * u.AA, 'fwhm_G': 50 * u.AA},
'evaluation': [(0.51 * u.micron, 1.0621795524 * u.Jy)],
'bounding_box': False
},
{
'class': Const1D,
'parameters': {'amplitude': 3 * u.Jy},
'evaluation': [(0.6 * u.micron, 3 * u.Jy)],
'bounding_box': False
},
{
'class': Box1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'width': 1 * u.um},
'evaluation': [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],
'bounding_box': [3.9, 4.9] * u.um
},
{
'class': Trapezoid1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um,
'width': 1 * u.um, 'slope': 5 * u.Jy / u.um},
'evaluation': [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],
'bounding_box': [3.3, 5.5] * u.um
},
{
'class': RickerWavelet1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'sigma': 1e-3 * u.mm},
'evaluation': [(1000 * u.nm, -0.09785050 * u.Jy)],
'bounding_box': [-5.6, 14.4] * u.um
},
{
'class': Moffat1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'gamma': 1e-3 * u.mm, 'alpha': 1},
'evaluation': [(1000 * u.nm, 0.238853503 * u.Jy)],
'bounding_box': False
},
{
'class': KingProjectedAnalytic1D,
'parameters': {'amplitude': 1. * u.Msun/u.pc**2, 'r_core': 1. * u.pc, 'r_tide': 2. * u.pc},
'evaluation': [(0.5 * u.pc, 0.2 * u.Msun/u.pc**2)],
'bounding_box': [0. * u.pc, 2. * u.pc]
},
{
'class': Logarithmic1D,
'parameters': {'amplitude': 5*u.m, 'tau': 2 * u.m},
'evaluation': [(4 * u.m, 3.4657359027997265 * u.m)],
'bounding_box': False
},
{
'class': Exponential1D,
'parameters': {'amplitude': 5*u.m, 'tau': 2 * u.m},
'evaluation': [(4 * u.m, 36.945280494653254 * u.m)],
'bounding_box': False
}
]
SCALE_MODELS = [
{
'class': Scale,
'parameters': {'factor': 2*u.m},
'evaluation': [(1*u.m, 2*u.m)],
'bounding_box': False
},
{
'class': Multiply,
'parameters': {'factor': 2*u.m},
'evaluation': [(1 * u.m/u.m, 2*u.m)],
'bounding_box': False
},
]
PHYS_MODELS_1D = [
{
'class': Plummer1D,
'parameters': {'mass': 3 * u.kg, 'r_plum': 0.5 * u.m},
'evaluation': [(1 * u.m, 0.10249381 * u.kg / (u.m ** 3))],
'bounding_box': False
},
{
'class': Drude1D,
'parameters': {'amplitude': 1.0 * u.m, 'x_0': 2175. * u.AA, 'fwhm': 400. * u.AA},
'evaluation': [(2000 * u.AA, 0.5452317018423869 * u.m)],
'bounding_box': [-17825, 22175] * u.AA
},
]
FUNC_MODELS_2D = [
{
'class': Gaussian2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_mean': 2 * u.m, 'y_mean': 1 * u.m,
'x_stddev': 3 * u.m, 'y_stddev': 2 * u.m, 'theta': 45 * u.deg},
'evaluation': [(412.1320343 * u.cm, 3.121320343 * u.m, 3 * u.Jy * np.exp(-0.5))],
'bounding_box': [[-13.02230366, 15.02230366],
[-12.02230366, 16.02230366]] * u.m
},
{
'class': Const2D,
'parameters': {'amplitude': 3 * u.Jy},
'evaluation': [(0.6 * u.micron, 0.2 * u.m, 3 * u.Jy)],
'bounding_box': False
},
{
'class': Disk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'R_0': 300 * u.cm},
'evaluation': [(5.8 * u.m, 201 * u.cm, 3 * u.Jy)],
'bounding_box': [[-1, 5], [0, 6]] * u.m
},
{
'class': TrapezoidDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 1 * u.m, 'y_0': 2 * u.m,
'R_0': 100 * u.cm, 'slope': 1 * u.Jy / u.m},
'evaluation': [(3.5 * u.m, 2 * u.m, 1.5 * u.Jy)],
'bounding_box': [[-2, 6], [-3, 5]] * u.m
},
{
'class': Ellipse2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'a': 300 * u.cm, 'b': 200 * u.cm, 'theta': 45 * u.deg},
'evaluation': [(4 * u.m, 300 * u.cm, 3 * u.Jy)],
'bounding_box': [[-0.5495097567963922, 4.549509756796392],
[0.4504902432036073, 5.549509756796393]] * u.m
},
{
'class': Ring2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'r_in': 2 * u.cm, 'r_out': 2.1 * u.cm},
'evaluation': [(302.05 * u.cm, 2 * u.m + 10 * u.um, 3 * u.Jy)],
'bounding_box': [[1.979, 2.021], [2.979, 3.021]] * u.m
},
{
'class': Box2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.s,
'x_width': 4 * u.cm, 'y_width': 3 * u.s},
'evaluation': [(301 * u.cm, 3 * u.s, 3 * u.Jy)],
'bounding_box': [[0.5 * u.s, 3.5 * u.s], [2.98 * u.m, 3.02 * u.m]]
},
{
'class': RickerWavelet2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'sigma': 1 * u.m},
'evaluation': [(4 * u.m, 2.5 * u.m, 0.602169107 * u.Jy)],
'bounding_box': False
},
{
'class': AiryDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'radius': 1 * u.m},
'evaluation': [(4 * u.m, 2.1 * u.m, 4.76998480e-05 * u.Jy)],
'bounding_box': False
},
{
'class': Moffat2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'y_0': 3.5 * u.um,
'gamma': 1e-3 * u.mm, 'alpha': 1},
'evaluation': [(1000 * u.nm, 2 * u.um, 0.202565833 * u.Jy)],
'bounding_box': False
},
{
'class': Sersic2D,
'parameters': {'amplitude': 3 * u.MJy / u.sr, 'x_0': 1 * u.arcsec,
'y_0': 2 * u.arcsec, 'r_eff': 2 * u.arcsec, 'n': 4,
'ellip': 0, 'theta': 0},
'evaluation': [(3 * u.arcsec, 2.5 * u.arcsec, 2.829990489 * u.MJy/u.sr)],
'bounding_box': False
},
{
'class': Planar2D,
'parameters': {'slope_x': 2*u.m, 'slope_y': 3*u.m, 'intercept': 4*u.m},
'evaluation': [(5*u.m/u.m, 6*u.m/u.m, 32*u.m)],
'bounding_box': False
},
]
POWERLAW_MODELS = [
{
'class': PowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1},
'evaluation': [(1 * u.m, 500 * u.g)],
'bounding_box': False
},
{
'class': BrokenPowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_break': 10 * u.cm, 'alpha_1': 1, 'alpha_2': -1},
'evaluation': [(1 * u.m, 50 * u.kg), (1 * u.cm, 50 * u.kg)],
'bounding_box': False
},
{
'class': SmoothlyBrokenPowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_break': 10 * u.cm,
'alpha_1': 1, 'alpha_2': -1, 'delta': 1},
'evaluation': [(1 * u.cm, 15.125 * u.kg), (1 * u.m, 15.125 * u.kg)],
'bounding_box': False
},
{
'class': ExponentialCutoffPowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1, 'x_cutoff': 1 * u.m},
'evaluation': [(1 * u.um, 499999.5 * u.kg), (10 * u.m, 50 * np.exp(-10) * u.g)],
'bounding_box': False
},
{
'class': LogParabola1D,
'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1, 'beta': 2},
'evaluation': [(1 * u.cm, 5 * 0.1 ** (-1 - 2 * np.log(0.1)) * u.kg)],
'bounding_box': False
},
]
POLY_MODELS = [
{
'class': Polynomial1D,
'parameters': {'degree': 2, 'c0': 3 * u.one, 'c1': 2 / u.m, 'c2': 3 / u.m**2},
'evaluation': [(3 * u.m, 36 * u.one)],
'bounding_box': False
},
{
'class': Polynomial1D,
'parameters': {'degree': 2, 'c0': 3 * u.kg, 'c1': 2 * u.kg / u.m, 'c2': 3 * u.kg / u.m**2},
'evaluation': [(3 * u.m, 36 * u.kg)],
'bounding_box': False
},
{
'class': Polynomial1D,
'parameters': {'degree': 2, 'c0': 3 * u.kg, 'c1': 2 * u.kg, 'c2': 3 * u.kg},
'evaluation': [(3 * u.one, 36 * u.kg)],
'bounding_box': False
},
{
'class': Polynomial2D,
'parameters': {'degree': 2, 'c0_0': 3 * u.one, 'c1_0': 2 / u.m, 'c2_0': 3 / u.m**2,
'c0_1': 3 / u.s, 'c0_2': -2 / u.s**2, 'c1_1': 5 / u.m / u.s},
'evaluation': [(3 * u.m, 2 * u.s, 64 * u.one)],
'bounding_box': False
},
{
'class': Polynomial2D,
'parameters': {'degree': 2, 'c0_0': 3 * u.kg,
'c1_0': 2 * u.kg / u.m, 'c2_0': 3 * u.kg / u.m**2,
'c0_1': 3 * u.kg / u.s, 'c0_2': -2 * u.kg / u.s**2,
'c1_1': 5 * u.kg / u.m / u.s},
'evaluation': [(3 * u.m, 2 * u.s, 64 * u.kg)],
'bounding_box': False
},
{
'class': Polynomial2D,
'parameters': {'degree': 2, 'c0_0': 3 * u.kg, 'c1_0': 2 * u.kg, 'c2_0': 3 * u.kg,
'c0_1': 3 * u.kg, 'c0_2': -2 * u.kg, 'c1_1': 5 * u.kg},
'evaluation': [(3 * u.one, 2 * u.one, 64 * u.kg)],
'bounding_box': False
},
]
MODELS = (FUNC_MODELS_1D + SCALE_MODELS + FUNC_MODELS_2D + POWERLAW_MODELS +
PHYS_MODELS_1D + POLY_MODELS)
SCIPY_MODELS = set([Sersic1D, Sersic2D, AiryDisk2D])
# These models will fail fitting test, because built in fitting data
# will produce non-finite values
NON_FINITE_LevMar_MODELS = [
Sersic1D,
ArcSine1D,
ArcCosine1D,
PowerLaw1D,
ExponentialCutoffPowerLaw1D,
BrokenPowerLaw1D,
LogParabola1D
]
# These models will fail the TRFLSQFitter fitting test due to non-finite
NON_FINITE_TRF_MODELS = [
ArcSine1D,
ArcCosine1D,
Sersic1D,
Sersic2D,
PowerLaw1D,
ExponentialCutoffPowerLaw1D,
BrokenPowerLaw1D
]
# These models will fail the LMLSQFitter fitting test due to non-finite
NON_FINITE_LM_MODELS = [
Sersic1D,
ArcSine1D,
ArcCosine1D,
PowerLaw1D,
LogParabola1D,
ExponentialCutoffPowerLaw1D,
BrokenPowerLaw1D
]
# These models will fail the DogBoxLSQFitter fitting test due to non-finite
NON_FINITE_DogBox_MODELS = [
Sersic1D,
Sersic2D,
ArcSine1D,
ArcCosine1D,
SmoothlyBrokenPowerLaw1D,
ExponentialCutoffPowerLaw1D,
LogParabola1D
]
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_without_units(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
for args in model['evaluation']:
if len(args) == 2:
kwargs = dict(zip(('x', 'y'), args))
else:
kwargs = dict(zip(('x', 'y', 'z'), args))
if kwargs['x'].unit.is_equivalent(kwargs['y'].unit):
kwargs['x'] = kwargs['x'].to(kwargs['y'].unit)
mnu = m.without_units_for_data(**kwargs)
args = [x.value for x in kwargs.values()]
assert_quantity_allclose(mnu(*args[:-1]), args[-1])
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_with_units(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
for args in model['evaluation']:
assert_quantity_allclose(m(*args[:-1]), args[-1])
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_with_units_x_array(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
for args in model['evaluation']:
if len(args) == 2:
x, y = args
x_arr = u.Quantity([x, x])
result = m(x_arr)
assert_quantity_allclose(result, u.Quantity([y, y]))
else:
x, y, z = args
x_arr = u.Quantity([x, x])
y_arr = u.Quantity([y, y])
result = m(x_arr, y_arr)
assert_quantity_allclose(result, u.Quantity([z, z]))
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_with_units_param_array(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
params = {}
for key, value in model['parameters'].items():
if value is None or key == 'degree':
params[key] = value
else:
params[key] = np.repeat(value, 2)
params['n_models'] = 2
m = model['class'](**params)
for args in model['evaluation']:
if len(args) == 2:
x, y = args
x_arr = u.Quantity([x, x])
result = m(x_arr)
assert_quantity_allclose(result, u.Quantity([y, y]))
else:
x, y, z = args
x_arr = u.Quantity([x, x])
y_arr = u.Quantity([y, y])
result = m(x_arr, y_arr)
assert_quantity_allclose(result, u.Quantity([z, z]))
if model['class'] == Drude1D:
params['x_0'][-1] = 0 * u.AA
with pytest.raises(InputParameterError) as err:
model['class'](**params)
assert str(err.value) == '0 is not an allowed value for x_0'
@pytest.mark.parametrize('model', MODELS)
def test_models_bounding_box(model):
# In some cases, having units in parameters caused bounding_box to break,
# so this is to ensure that it works correctly.
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
# In the following we need to explicitly test that the value is False
# since Quantities no longer evaluate as as True
if model['bounding_box'] is False:
# Check that NotImplementedError is raised, so that if bounding_box is
# implemented we remember to set bounding_box=True in the list of models
# above
with pytest.raises(NotImplementedError):
m.bounding_box
else:
# A bounding box may have inhomogeneous units so we need to check the
# values one by one.
for i in range(len(model['bounding_box'])):
bbox = m.bounding_box
if isinstance(bbox, ModelBoundingBox):
bbox = bbox.bounding_box()
assert_quantity_allclose(bbox[i], model['bounding_box'][i])
@pytest.mark.parametrize('model', MODELS)
def test_compound_model_input_units_equivalencies_defaults(model):
m = model['class'](**model['parameters'])
assert m.input_units_equivalencies is None
compound_model = m + m
assert compound_model.inputs_map()['x'][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {'x': 1})
assert fixed_input_model.input_units_equivalencies is None
compound_model = m - m
assert compound_model.inputs_map()['x'][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {'x': 1})
assert fixed_input_model.input_units_equivalencies is None
compound_model = m & m
assert compound_model.inputs_map()['x1'][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {'x0': 1})
assert fixed_input_model.inputs_map()['x1'][0].input_units_equivalencies is None
assert fixed_input_model.input_units_equivalencies is None
if m.n_outputs == m.n_inputs:
compound_model = m | m
assert compound_model.inputs_map()['x'][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {'x': 1})
assert fixed_input_model.input_units_equivalencies is None
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings(r'ignore:.*:RuntimeWarning')
@pytest.mark.filterwarnings(r'ignore:Model is linear in parameters.*')
@pytest.mark.filterwarnings(r'ignore:The fit may be unsuccessful.*')
@pytest.mark.parametrize('model', MODELS)
@pytest.mark.parametrize('fitter', fitters)
def test_models_fitting(model, fitter):
fitter = fitter()
if (
(isinstance(fitter, LevMarLSQFitter) and model['class'] in NON_FINITE_LevMar_MODELS) or
(isinstance(fitter, TRFLSQFitter) and model['class'] in NON_FINITE_TRF_MODELS) or
(isinstance(fitter, LMLSQFitter) and model['class'] in NON_FINITE_LM_MODELS) or
(isinstance(fitter, DogBoxLSQFitter) and model['class'] in NON_FINITE_DogBox_MODELS)
):
return
m = model['class'](**model['parameters'])
if len(model['evaluation'][0]) == 2:
x = np.linspace(1, 3, 100) * model['evaluation'][0][0].unit
y = np.exp(-x.value ** 2) * model['evaluation'][0][1].unit
args = [x, y]
else:
x = np.linspace(1, 3, 100) * model['evaluation'][0][0].unit
y = np.linspace(1, 3, 100) * model['evaluation'][0][1].unit
z = np.exp(-x.value**2 - y.value**2) * model['evaluation'][0][2].unit
args = [x, y, z]
# Test that the model fits even if it has units on parameters
m_new = fitter(m, *args)
# Check that units have been put back correctly
for param_name in m.param_names:
par_bef = getattr(m, param_name)
par_aft = getattr(m_new, param_name)
if par_bef.unit is None:
# If the parameter used to not have a unit then had a radian unit
# for example, then we should allow that
assert par_aft.unit is None or par_aft.unit is u.rad
else:
assert par_aft.unit.is_equivalent(par_bef.unit)
unit_mismatch_models = [
{'class': Gaussian2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_mean': 2 * u.m, 'y_mean': 1 * u.m,
'x_stddev': 3 * u.m, 'y_stddev': 2 * u.m, 'theta': 45 * u.deg},
'evaluation': [(412.1320343 * u.cm, 3.121320343 * u.K, 3 * u.Jy * np.exp(-0.5)),
(412.1320343 * u.K, 3.121320343 * u.m, 3 * u.Jy * np.exp(-0.5))],
'bounding_box': [[-14.18257445, 16.18257445], [-10.75693665, 14.75693665]] * u.m},
{'class': Ellipse2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'a': 300 * u.cm, 'b': 200 * u.cm, 'theta': 45 * u.deg},
'evaluation': [(4 * u.m, 300 * u.K, 3 * u.Jy),
(4 * u.K, 300 * u.cm, 3 * u.Jy)],
'bounding_box': [[-0.76046808, 4.76046808], [0.68055697, 5.31944302]] * u.m},
{'class': Disk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'R_0': 300 * u.cm},
'evaluation': [(5.8 * u.m, 201 * u.K, 3 * u.Jy),
(5.8 * u.K, 201 * u.cm, 3 * u.Jy)],
'bounding_box': [[-1, 5], [0, 6]] * u.m},
{'class': Ring2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'r_in': 2 * u.cm, 'r_out': 2.1 * u.cm},
'evaluation': [(302.05 * u.cm, 2 * u.K + 10 * u.K, 3 * u.Jy),
(302.05 * u.K, 2 * u.m + 10 * u.um, 3 * u.Jy)],
'bounding_box': [[1.979, 2.021], [2.979, 3.021]] * u.m},
{'class': TrapezoidDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 1 * u.m, 'y_0': 2 * u.m,
'R_0': 100 * u.cm, 'slope': 1 * u.Jy / u.m},
'evaluation': [(3.5 * u.m, 2 * u.K, 1.5 * u.Jy),
(3.5 * u.K, 2 * u.m, 1.5 * u.Jy)],
'bounding_box': [[-2, 6], [-3, 5]] * u.m},
{'class': RickerWavelet2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'sigma': 1 * u.m},
'evaluation': [(4 * u.m, 2.5 * u.K, 0.602169107 * u.Jy),
(4 * u.K, 2.5 * u.m, 0.602169107 * u.Jy)],
'bounding_box': False},
{'class': AiryDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'radius': 1 * u.m},
'evaluation': [(4 * u.m, 2.1 * u.K, 4.76998480e-05 * u.Jy),
(4 * u.K, 2.1 * u.m, 4.76998480e-05 * u.Jy)],
'bounding_box': False},
{'class': Moffat2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'y_0': 3.5 * u.um,
'gamma': 1e-3 * u.mm, 'alpha': 1},
'evaluation': [(1000 * u.nm, 2 * u.K, 0.202565833 * u.Jy),
(1000 * u.K, 2 * u.um, 0.202565833 * u.Jy)],
'bounding_box': False},
{'class': Sersic2D,
'parameters': {'amplitude': 3 * u.MJy / u.sr, 'x_0': 1 * u.arcsec,
'y_0': 2 * u.arcsec, 'r_eff': 2 * u.arcsec, 'n': 4,
'ellip': 0, 'theta': 0},
'evaluation': [(3 * u.arcsec, 2.5 * u.m, 2.829990489 * u.MJy/u.sr),
(3 * u.m, 2.5 * u.arcsec, 2.829990489 * u.MJy/u.sr)],
'bounding_box': False},
]
@pytest.mark.parametrize('model', unit_mismatch_models)
def test_input_unit_mismatch_error(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
message = "Units of 'x' and 'y' inputs should match"
m = model['class'](**model['parameters'])
for args in model['evaluation']:
if len(args) == 2:
kwargs = dict(zip(('x', 'y'), args))
else:
kwargs = dict(zip(('x', 'y', 'z'), args))
if kwargs['x'].unit.is_equivalent(kwargs['y'].unit):
kwargs['x'] = kwargs['x'].to(kwargs['y'].unit)
with pytest.raises(u.UnitsError) as err:
m.without_units_for_data(**kwargs)
assert str(err.value) == message
|
63b0ef0c7879d854198b2cc21eadb9c613db9cb5883252de5851f9aa7979a822 | import sys
import pytest
from astropy.samp import conf
from astropy.samp.hub_script import hub_script
def setup_module(module):
conf.use_internet = False
def setup_function(function):
function.sys_argv_orig = sys.argv
sys.argv = ["samp_hub"]
def teardown_function(function):
sys.argv = function.sys_argv_orig
@pytest.mark.slow
def test_hub_script():
sys.argv.append('-m') # run in multiple mode
sys.argv.append('-w') # disable web profile
hub_script(timeout=3)
|
8ab1807745fc7ea6b7144b29350f0fc7cbe98414faacdf584828e153ac9c1464 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import time
import pytest
from astropy.samp import conf
from astropy.samp.hub import SAMPHubServer
def setup_module(module):
conf.use_internet = False
def test_SAMPHubServer():
"""Test that SAMPHub can be instantiated"""
SAMPHubServer(web_profile=False, mode='multiple', pool_size=1)
def test_SAMPHubServer_run():
"""Test that SAMPHub can be run"""
hub = SAMPHubServer(web_profile=False, mode='multiple', pool_size=1)
hub.start()
time.sleep(1)
hub.stop()
@pytest.mark.slow
def test_SAMPHubServer_run_repeated():
"""
Test that SAMPHub can be restarted after it has been stopped, including
when web profile support is enabled.
"""
hub = SAMPHubServer(web_profile=True, mode='multiple', pool_size=1)
hub.start()
time.sleep(1)
hub.stop()
time.sleep(1)
hub.start()
time.sleep(1)
hub.stop()
|
a11b4aa47e77fbabc5d4339b36fd75dec2547d336b6266101549c6870a417939 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.utils.iers package provides access to the tables provided by
the International Earth Rotation and Reference Systems Service, in
particular allowing interpolation of published UT1-UTC values for given
times. These are used in `astropy.time` to provide UT1 values. The polar
motions are also used for determining earth orientation for
celestial-to-terrestrial coordinate transformations
(in `astropy.coordinates`).
"""
import re
from datetime import datetime
from warnings import warn
from urllib.parse import urlparse
import numpy as np
import erfa
from astropy.time import Time, TimeDelta
from astropy import config as _config
from astropy import units as u
from astropy.table import QTable, MaskedColumn
from astropy.utils.data import (get_pkg_data_filename, clear_download_cache,
is_url_in_cache, get_readable_fileobj)
from astropy.utils.state import ScienceState
from astropy import utils
from astropy.utils.exceptions import AstropyWarning
__all__ = ['Conf', 'conf', 'earth_orientation_table',
'IERS', 'IERS_B', 'IERS_A', 'IERS_Auto',
'FROM_IERS_B', 'FROM_IERS_A', 'FROM_IERS_A_PREDICTION',
'TIME_BEFORE_IERS_RANGE', 'TIME_BEYOND_IERS_RANGE',
'IERS_A_FILE', 'IERS_A_URL', 'IERS_A_URL_MIRROR', 'IERS_A_README',
'IERS_B_FILE', 'IERS_B_URL', 'IERS_B_README',
'IERSRangeError', 'IERSStaleWarning', 'IERSWarning',
'IERSDegradedAccuracyWarning',
'LeapSeconds', 'IERS_LEAP_SECOND_FILE', 'IERS_LEAP_SECOND_URL',
'IETF_LEAP_SECOND_URL']
# IERS-A default file name, URL, and ReadMe with content description
IERS_A_FILE = 'finals2000A.all'
IERS_A_URL = 'https://datacenter.iers.org/data/9/finals2000A.all'
IERS_A_URL_MIRROR = 'https://maia.usno.navy.mil/ser7/finals2000A.all'
IERS_A_README = get_pkg_data_filename('data/ReadMe.finals2000A')
# IERS-B default file name, URL, and ReadMe with content description
IERS_B_FILE = get_pkg_data_filename('data/eopc04_IAU2000.62-now')
IERS_B_URL = 'http://hpiers.obspm.fr/iers/eop/eopc04/eopc04_IAU2000.62-now'
IERS_B_README = get_pkg_data_filename('data/ReadMe.eopc04_IAU2000')
# LEAP SECONDS default file name, URL, and alternative format/URL
IERS_LEAP_SECOND_FILE = get_pkg_data_filename('data/Leap_Second.dat')
IERS_LEAP_SECOND_URL = 'https://hpiers.obspm.fr/iers/bul/bulc/Leap_Second.dat'
IETF_LEAP_SECOND_URL = 'https://www.ietf.org/timezones/data/leap-seconds.list'
# Status/source values returned by IERS.ut1_utc
FROM_IERS_B = 0
FROM_IERS_A = 1
FROM_IERS_A_PREDICTION = 2
TIME_BEFORE_IERS_RANGE = -1
TIME_BEYOND_IERS_RANGE = -2
MJD_ZERO = 2400000.5
INTERPOLATE_ERROR = """\
interpolating from IERS_Auto using predictive values that are more
than {0} days old.
Normally you should not see this error because this class
automatically downloads the latest IERS-A table. Perhaps you are
offline? If you understand what you are doing then this error can be
suppressed by setting the auto_max_age configuration variable to
``None``:
from astropy.utils.iers import conf
conf.auto_max_age = None
"""
MONTH_ABBR = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
class IERSWarning(AstropyWarning):
"""
Generic warning class for IERS.
"""
class IERSDegradedAccuracyWarning(AstropyWarning):
"""
IERS time conversion has degraded accuracy normally due to setting
``conf.auto_download = False`` and ``conf.iers_degraded_accuracy = 'warn'``.
"""
class IERSStaleWarning(IERSWarning):
"""
Downloaded IERS table may be stale.
"""
def download_file(*args, **kwargs):
"""
Overload astropy.utils.data.download_file within iers module to use a
custom (longer) wait time. This just passes through ``*args`` and
``**kwargs`` after temporarily setting the download_file remote timeout to
the local ``iers.conf.remote_timeout`` value.
"""
kwargs.setdefault('http_headers', {'User-Agent': 'astropy/iers',
'Accept': '*/*'})
with utils.data.conf.set_temp('remote_timeout', conf.remote_timeout):
return utils.data.download_file(*args, **kwargs)
def _none_to_float(value):
"""
Convert None to a valid floating point value. Especially
for auto_max_age = None.
"""
return (value if value is not None else np.finfo(float).max)
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.utils.iers`.
"""
auto_download = _config.ConfigItem(
True,
'Enable auto-downloading of the latest IERS data. If set to False '
'then the local IERS-B file will be used by default (even if the '
'full IERS file with predictions was already downloaded and cached). '
'This parameter also controls whether internet resources will be '
'queried to update the leap second table if the installed version is '
'out of date. Default is True.')
auto_max_age = _config.ConfigItem(
30.0,
'Maximum age (days) of predictive data before auto-downloading. '
'See "Auto refresh behavior" in astropy.utils.iers documentation for details. '
'Default is 30.')
iers_auto_url = _config.ConfigItem(
IERS_A_URL,
'URL for auto-downloading IERS file data.')
iers_auto_url_mirror = _config.ConfigItem(
IERS_A_URL_MIRROR,
'Mirror URL for auto-downloading IERS file data.')
remote_timeout = _config.ConfigItem(
10.0,
'Remote timeout downloading IERS file data (seconds).')
iers_degraded_accuracy = _config.ConfigItem(
['error', 'warn', 'ignore'],
'IERS behavior if the range of available IERS data does not '
'cover the times when converting time scales, potentially leading '
'to degraded accuracy.')
system_leap_second_file = _config.ConfigItem(
'',
'System file with leap seconds.')
iers_leap_second_auto_url = _config.ConfigItem(
IERS_LEAP_SECOND_URL,
'URL for auto-downloading leap seconds.')
ietf_leap_second_auto_url = _config.ConfigItem(
IETF_LEAP_SECOND_URL,
'Alternate URL for auto-downloading leap seconds.')
conf = Conf()
class IERSRangeError(IndexError):
"""
Any error for when dates are outside of the valid range for IERS
"""
class IERS(QTable):
"""Generic IERS table class, defining interpolation functions.
Sub-classed from `astropy.table.QTable`. The table should hold columns
'MJD', 'UT1_UTC', 'dX_2000A'/'dY_2000A', and 'PM_x'/'PM_y'.
"""
iers_table = None
"""Cached table, returned if ``open`` is called without arguments."""
@classmethod
def open(cls, file=None, cache=False, **kwargs):
"""Open an IERS table, reading it from a file if not loaded before.
Parameters
----------
file : str or None
full local or network path to the ascii file holding IERS data,
for passing on to the ``read`` class methods (further optional
arguments that are available for some IERS subclasses can be added).
If None, use the default location from the ``read`` class method.
cache : bool
Whether to use cache. Defaults to False, since IERS files
are regularly updated.
Returns
-------
IERS
An IERS table class instance
Notes
-----
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table if ``file=None`` (the default).
If a table needs to be re-read from disk, pass on an explicit file
location or use the (sub-class) close method and re-open.
If the location is a network location it is first downloaded via
download_file.
For the IERS class itself, an IERS_B sub-class instance is opened.
"""
if file is not None or cls.iers_table is None:
if file is not None:
if urlparse(file).netloc:
kwargs.update(file=download_file(file, cache=cache))
else:
kwargs.update(file=file)
# TODO: the below is really ugly and probably a bad idea. Instead,
# there should probably be an IERSBase class, which provides
# useful methods but cannot really be used on its own, and then
# *perhaps* an IERS class which provides best defaults. But for
# backwards compatibility, we use the IERS_B reader for IERS here.
if cls is IERS:
cls.iers_table = IERS_B.read(**kwargs)
else:
cls.iers_table = cls.read(**kwargs)
return cls.iers_table
@classmethod
def close(cls):
"""Remove the IERS table from the class.
This allows the table to be re-read from disk during one's session
(e.g., if one finds it is out of date and has updated the file).
"""
cls.iers_table = None
def mjd_utc(self, jd1, jd2=0.):
"""Turn a time to MJD, returning integer and fractional parts.
Parameters
----------
jd1 : float, array, or `~astropy.time.Time`
first part of two-part JD, or Time object
jd2 : float or array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
Returns
-------
mjd : float or array
integer part of MJD
utc : float or array
fractional part of MJD
"""
try: # see if this is a Time object
jd1, jd2 = jd1.utc.jd1, jd1.utc.jd2
except Exception:
pass
mjd = np.floor(jd1 - MJD_ZERO + jd2)
utc = jd1 - (MJD_ZERO+mjd) + jd2
return mjd, utc
def ut1_utc(self, jd1, jd2=0., return_status=False):
"""Interpolate UT1-UTC corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(jd1, jd2, ['UT1_UTC'],
self.ut1_utc_source if return_status else None)
def dcip_xy(self, jd1, jd2=0., return_status=False):
"""Interpolate CIP corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD (default 0., ignored if jd1 is Time)
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
D_x : `~astropy.units.Quantity` ['angle']
x component of CIP correction for the requested times.
D_y : `~astropy.units.Quantity` ['angle']
y component of CIP correction for the requested times
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(jd1, jd2, ['dX_2000A', 'dY_2000A'],
self.dcip_source if return_status else None)
def pm_xy(self, jd1, jd2=0., return_status=False):
"""Interpolate polar motions from IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
PM_x : `~astropy.units.Quantity` ['angle']
x component of polar motion for the requested times.
PM_y : `~astropy.units.Quantity` ['angle']
y component of polar motion for the requested times.
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(jd1, jd2, ['PM_x', 'PM_y'],
self.pm_source if return_status else None)
def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd):
"""
Check that the indices from interpolation match those after clipping
to the valid table range. This method gets overridden in the IERS_Auto
class because it has different requirements.
"""
if np.any(indices_orig != indices_clipped):
if conf.iers_degraded_accuracy == 'error':
msg = ('(some) times are outside of range covered by IERS table. Cannot convert '
'with full accuracy. To allow conversion with degraded accuracy '
'set astropy.utils.iers.conf.iers_degraded_accuracy '
'to "warn" or "silent". For more information about setting this '
'configuration parameter or controlling its value globally, see the '
'Astropy configuration system documentation '
'https://docs.astropy.org/en/stable/config/index.html.')
raise IERSRangeError(msg)
elif conf.iers_degraded_accuracy == 'warn':
# No IERS data covering the time(s) and user requested a warning.
msg = ('(some) times are outside of range covered by IERS table, '
'accuracy is degraded.')
warn(msg, IERSDegradedAccuracyWarning)
# No IERS data covering the time(s) and user is OK with no warning.
def _interpolate(self, jd1, jd2, columns, source=None):
mjd, utc = self.mjd_utc(jd1, jd2)
# enforce array
is_scalar = not hasattr(mjd, '__array__') or mjd.ndim == 0
if is_scalar:
mjd = np.array([mjd])
utc = np.array([utc])
elif mjd.size == 0:
# Short-cut empty input.
return np.array([])
self._refresh_table_as_needed(mjd)
# For typical format, will always find a match (since MJD are integer)
# hence, important to define which side we will be; this ensures
# self['MJD'][i-1]<=mjd<self['MJD'][i]
i = np.searchsorted(self['MJD'].value, mjd, side='right')
# Get index to MJD at or just below given mjd, clipping to ensure we
# stay in range of table (status will be set below for those outside)
i1 = np.clip(i, 1, len(self) - 1)
i0 = i1 - 1
mjd_0, mjd_1 = self['MJD'][i0].value, self['MJD'][i1].value
results = []
for column in columns:
val_0, val_1 = self[column][i0], self[column][i1]
d_val = val_1 - val_0
if column == 'UT1_UTC':
# Check & correct for possible leap second (correcting diff.,
# not 1st point, since jump can only happen right at 2nd point)
d_val -= d_val.round()
# Linearly interpolate (which is what TEMPO does for UT1-UTC, but
# may want to follow IERS gazette #13 for more precise
# interpolation and correction for tidal effects;
# https://maia.usno.navy.mil/iers-gaz13)
val = val_0 + (mjd - mjd_0 + utc) / (mjd_1 - mjd_0) * d_val
# Do not extrapolate outside range, instead just propagate last values.
val[i == 0] = self[column][0]
val[i == len(self)] = self[column][-1]
if is_scalar:
val = val[0]
results.append(val)
if source:
# Set status to source, using the routine passed in.
status = source(i1)
# Check for out of range
status[i == 0] = TIME_BEFORE_IERS_RANGE
status[i == len(self)] = TIME_BEYOND_IERS_RANGE
if is_scalar:
status = status[0]
results.append(status)
return results
else:
self._check_interpolate_indices(i1, i, np.max(mjd))
return results[0] if len(results) == 1 else results
def _refresh_table_as_needed(self, mjd):
"""
Potentially update the IERS table in place depending on the requested
time values in ``mdj`` and the time span of the table. The base behavior
is not to update the table. ``IERS_Auto`` overrides this method.
"""
pass
def ut1_utc_source(self, i):
"""Source for UT1-UTC. To be overridden by subclass."""
return np.zeros_like(i)
def dcip_source(self, i):
"""Source for CIP correction. To be overridden by subclass."""
return np.zeros_like(i)
def pm_source(self, i):
"""Source for polar motion. To be overridden by subclass."""
return np.zeros_like(i)
@property
def time_now(self):
"""
Property to provide the current time, but also allow for explicitly setting
the _time_now attribute for testing purposes.
"""
try:
return self._time_now
except Exception:
return Time.now()
def _convert_col_for_table(self, col):
# Fill masked columns with units to avoid dropped-mask warnings
# when converting to Quantity.
# TODO: Once we support masked quantities, we can drop this and
# in the code below replace b_bad with table['UT1_UTC_B'].mask, etc.
if (getattr(col, 'unit', None) is not None and
isinstance(col, MaskedColumn)):
col = col.filled(np.nan)
return super()._convert_col_for_table(col)
class IERS_A(IERS):
"""IERS Table class targeted to IERS A, provided by USNO.
These include rapid turnaround and predicted times.
See https://datacenter.iers.org/eop.php
Notes
-----
The IERS A file is not part of astropy. It can be downloaded from
``iers.IERS_A_URL`` or ``iers.IERS_A_URL_MIRROR``. See ``iers.__doc__``
for instructions on use in ``Time``, etc.
"""
iers_table = None
@classmethod
def _combine_a_b_columns(cls, iers_a):
"""
Return a new table with appropriate combination of IERS_A and B columns.
"""
# IERS A has some rows at the end that hold nothing but dates & MJD
# presumably to be filled later. Exclude those a priori -- there
# should at least be a predicted UT1-UTC and PM!
table = iers_a[np.isfinite(iers_a['UT1_UTC_A']) &
(iers_a['PolPMFlag_A'] != '')]
# This does nothing for IERS_A, but allows IERS_Auto to ensure the
# IERS B values in the table are consistent with the true ones.
table = cls._substitute_iers_b(table)
# Combine A and B columns, using B where possible.
b_bad = np.isnan(table['UT1_UTC_B'])
table['UT1_UTC'] = np.where(b_bad, table['UT1_UTC_A'], table['UT1_UTC_B'])
table['UT1Flag'] = np.where(b_bad, table['UT1Flag_A'], 'B')
# Repeat for polar motions.
b_bad = np.isnan(table['PM_X_B']) | np.isnan(table['PM_Y_B'])
table['PM_x'] = np.where(b_bad, table['PM_x_A'], table['PM_X_B'])
table['PM_y'] = np.where(b_bad, table['PM_y_A'], table['PM_Y_B'])
table['PolPMFlag'] = np.where(b_bad, table['PolPMFlag_A'], 'B')
b_bad = np.isnan(table['dX_2000A_B']) | np.isnan(table['dY_2000A_B'])
table['dX_2000A'] = np.where(b_bad, table['dX_2000A_A'], table['dX_2000A_B'])
table['dY_2000A'] = np.where(b_bad, table['dY_2000A_A'], table['dY_2000A_B'])
table['NutFlag'] = np.where(b_bad, table['NutFlag_A'], 'B')
# Get the table index for the first row that has predictive values
# PolPMFlag_A IERS (I) or Prediction (P) flag for
# Bull. A polar motion values
# UT1Flag_A IERS (I) or Prediction (P) flag for
# Bull. A UT1-UTC values
# Since only 'P' and 'I' are possible and 'P' is guaranteed to come
# after 'I', we can use searchsorted for 100 times speed up over
# finding the first index where the flag equals 'P'.
p_index = min(np.searchsorted(table['UT1Flag_A'], 'P'),
np.searchsorted(table['PolPMFlag_A'], 'P'))
table.meta['predictive_index'] = p_index
table.meta['predictive_mjd'] = table['MJD'][p_index].value
return table
@classmethod
def _substitute_iers_b(cls, table):
# See documentation in IERS_Auto.
return table
@classmethod
def read(cls, file=None, readme=None):
"""Read IERS-A table from a finals2000a.* file provided by USNO.
Parameters
----------
file : str
full path to ascii file holding IERS-A data.
Defaults to ``iers.IERS_A_FILE``.
readme : str
full path to ascii file holding CDS-style readme.
Defaults to package version, ``iers.IERS_A_README``.
Returns
-------
``IERS_A`` class instance
"""
if file is None:
file = IERS_A_FILE
if readme is None:
readme = IERS_A_README
iers_a = super().read(file, format='cds', readme=readme)
# Combine the A and B data for UT1-UTC and PM columns
table = cls._combine_a_b_columns(iers_a)
table.meta['data_path'] = file
table.meta['readme_path'] = readme
return table
def ut1_utc_source(self, i):
"""Set UT1-UTC source flag for entries in IERS table"""
ut1flag = self['UT1Flag'][i]
source = np.ones_like(i) * FROM_IERS_B
source[ut1flag == 'I'] = FROM_IERS_A
source[ut1flag == 'P'] = FROM_IERS_A_PREDICTION
return source
def dcip_source(self, i):
"""Set CIP correction source flag for entries in IERS table"""
nutflag = self['NutFlag'][i]
source = np.ones_like(i) * FROM_IERS_B
source[nutflag == 'I'] = FROM_IERS_A
source[nutflag == 'P'] = FROM_IERS_A_PREDICTION
return source
def pm_source(self, i):
"""Set polar motion source flag for entries in IERS table"""
pmflag = self['PolPMFlag'][i]
source = np.ones_like(i) * FROM_IERS_B
source[pmflag == 'I'] = FROM_IERS_A
source[pmflag == 'P'] = FROM_IERS_A_PREDICTION
return source
class IERS_B(IERS):
"""IERS Table class targeted to IERS B, provided by IERS itself.
These are final values; see https://www.iers.org/IERS/EN/Home/home_node.html
Notes
-----
If the package IERS B file (```iers.IERS_B_FILE``) is out of date, a new
version can be downloaded from ``iers.IERS_B_URL``.
"""
iers_table = None
@classmethod
def read(cls, file=None, readme=None, data_start=14):
"""Read IERS-B table from a eopc04_iau2000.* file provided by IERS.
Parameters
----------
file : str
full path to ascii file holding IERS-B data.
Defaults to package version, ``iers.IERS_B_FILE``.
readme : str
full path to ascii file holding CDS-style readme.
Defaults to package version, ``iers.IERS_B_README``.
data_start : int
starting row. Default is 14, appropriate for standard IERS files.
Returns
-------
``IERS_B`` class instance
"""
if file is None:
file = IERS_B_FILE
if readme is None:
readme = IERS_B_README
table = super().read(file, format='cds', readme=readme,
data_start=data_start)
table.meta['data_path'] = file
table.meta['readme_path'] = readme
return table
def ut1_utc_source(self, i):
"""Set UT1-UTC source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
def dcip_source(self, i):
"""Set CIP correction source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
def pm_source(self, i):
"""Set PM source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
class IERS_Auto(IERS_A):
"""
Provide most-recent IERS data and automatically handle downloading
of updated values as necessary.
"""
iers_table = None
@classmethod
def open(cls):
"""If the configuration setting ``astropy.utils.iers.conf.auto_download``
is set to True (default), then open a recent version of the IERS-A
table with predictions for UT1-UTC and polar motion out to
approximately one year from now. If the available version of this file
is older than ``astropy.utils.iers.conf.auto_max_age`` days old
(or non-existent) then it will be downloaded over the network and cached.
If the configuration setting ``astropy.utils.iers.conf.auto_download``
is set to False then ``astropy.utils.iers.IERS()`` is returned. This
is normally the IERS-B table that is supplied with astropy.
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table.
Returns
-------
`~astropy.table.QTable` instance
With IERS (Earth rotation) data columns
"""
if not conf.auto_download:
cls.iers_table = IERS_B.open()
return cls.iers_table
all_urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
if cls.iers_table is not None:
# If the URL has changed, we need to redownload the file, so we
# should ignore the internally cached version.
if cls.iers_table.meta.get('data_url') in all_urls:
return cls.iers_table
for url in all_urls:
try:
filename = download_file(url, cache=True)
except Exception as err:
warn(f'failed to download {url}: {err}', IERSWarning)
continue
try:
cls.iers_table = cls.read(file=filename)
except Exception as err:
warn(f'malformed IERS table from {url}: {err}', IERSWarning)
continue
cls.iers_table.meta['data_url'] = url
break
else:
# Issue a warning here, perhaps user is offline. An exception
# will be raised downstream if actually trying to interpolate
# predictive values.
warn('unable to download valid IERS file, using local IERS-B',
IERSWarning)
cls.iers_table = IERS_B.open()
return cls.iers_table
def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd):
"""Check that the indices from interpolation match those after clipping to the
valid table range. The IERS_Auto class is exempted as long as it has
sufficiently recent available data so the clipped interpolation is
always within the confidence bounds of current Earth rotation
knowledge.
"""
predictive_mjd = self.meta['predictive_mjd']
# See explanation in _refresh_table_as_needed for these conditions
auto_max_age = _none_to_float(conf.auto_max_age)
if (max_input_mjd > predictive_mjd and
self.time_now.mjd - predictive_mjd > auto_max_age):
raise ValueError(INTERPOLATE_ERROR.format(auto_max_age))
def _refresh_table_as_needed(self, mjd):
"""Potentially update the IERS table in place depending on the requested
time values in ``mjd`` and the time span of the table.
For IERS_Auto the behavior is that the table is refreshed from the IERS
server if both the following apply:
- Any of the requested IERS values are predictive. The IERS-A table
contains predictive data out for a year after the available
definitive values.
- The first predictive values are at least ``conf.auto_max_age days`` old.
In other words the IERS-A table was created by IERS long enough
ago that it can be considered stale for predictions.
"""
max_input_mjd = np.max(mjd)
now_mjd = self.time_now.mjd
# IERS-A table contains predictive data out for a year after
# the available definitive values.
fpi = self.meta['predictive_index']
predictive_mjd = self.meta['predictive_mjd']
# Update table in place if necessary
auto_max_age = _none_to_float(conf.auto_max_age)
# If auto_max_age is smaller than IERS update time then repeated downloads may
# occur without getting updated values (giving a IERSStaleWarning).
if auto_max_age < 10:
raise ValueError('IERS auto_max_age configuration value must be larger than 10 days')
if (max_input_mjd > predictive_mjd and
(now_mjd - predictive_mjd) > auto_max_age):
all_urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
# Get the latest version
try:
filename = download_file(
all_urls[0], sources=all_urls, cache="update")
except Exception as err:
# Issue a warning here, perhaps user is offline. An exception
# will be raised downstream when actually trying to interpolate
# predictive values.
warn(AstropyWarning(
f'failed to download {" and ".join(all_urls)}: {err}.\n'
'A coordinate or time-related '
'calculation might be compromised or fail because the dates are '
'not covered by the available IERS file. See the '
'"IERS data access" section of the astropy documentation '
'for additional information on working offline.'))
return
new_table = self.__class__.read(file=filename)
new_table.meta['data_url'] = str(all_urls[0])
# New table has new values?
if new_table['MJD'][-1] > self['MJD'][-1]:
# Replace *replace* current values from the first predictive index through
# the end of the current table. This replacement is much faster than just
# deleting all rows and then using add_row for the whole duration.
new_fpi = np.searchsorted(new_table['MJD'].value, predictive_mjd, side='right')
n_replace = len(self) - fpi
self[fpi:] = new_table[new_fpi:new_fpi + n_replace]
# Sanity check for continuity
if new_table['MJD'][new_fpi + n_replace] - self['MJD'][-1] != 1.0 * u.d:
raise ValueError('unexpected gap in MJD when refreshing IERS table')
# Now add new rows in place
for row in new_table[new_fpi + n_replace:]:
self.add_row(row)
self.meta.update(new_table.meta)
else:
warn(IERSStaleWarning(
'IERS_Auto predictive values are older than {} days but downloading '
'the latest table did not find newer values'.format(conf.auto_max_age)))
@classmethod
def _substitute_iers_b(cls, table):
"""Substitute IERS B values with those from a real IERS B table.
IERS-A has IERS-B values included, but for reasons unknown these
do not match the latest IERS-B values (see comments in #4436).
Here, we use the bundled astropy IERS-B table to overwrite the values
in the downloaded IERS-A table.
"""
iers_b = IERS_B.open()
# Substitute IERS-B values for existing B values in IERS-A table
mjd_b = table['MJD'][np.isfinite(table['UT1_UTC_B'])]
i0 = np.searchsorted(iers_b['MJD'], mjd_b[0], side='left')
i1 = np.searchsorted(iers_b['MJD'], mjd_b[-1], side='right')
iers_b = iers_b[i0:i1]
n_iers_b = len(iers_b)
# If there is overlap then replace IERS-A values from available IERS-B
if n_iers_b > 0:
# Sanity check that we are overwriting the correct values
if not u.allclose(table['MJD'][:n_iers_b], iers_b['MJD']):
raise ValueError('unexpected mismatch when copying '
'IERS-B values into IERS-A table.')
# Finally do the overwrite
table['UT1_UTC_B'][:n_iers_b] = iers_b['UT1_UTC']
table['PM_X_B'][:n_iers_b] = iers_b['PM_x']
table['PM_Y_B'][:n_iers_b] = iers_b['PM_y']
table['dX_2000A_B'][:n_iers_b] = iers_b['dX_2000A']
table['dY_2000A_B'][:n_iers_b] = iers_b['dY_2000A']
return table
class earth_orientation_table(ScienceState):
"""Default IERS table for Earth rotation and reference systems service.
These tables are used to calculate the offsets between ``UT1`` and ``UTC``
and for conversion to Earth-based coordinate systems.
The state itself is an IERS table, as an instance of one of the
`~astropy.utils.iers.IERS` classes. The default, the auto-updating
`~astropy.utils.iers.IERS_Auto` class, should suffice for most
purposes.
Examples
--------
To temporarily use the IERS-B file packaged with astropy::
>>> from astropy.utils import iers
>>> from astropy.time import Time
>>> iers_b = iers.IERS_B.open(iers.IERS_B_FILE)
>>> with iers.earth_orientation_table.set(iers_b):
... print(Time('2000-01-01').ut1.isot)
2000-01-01T00:00:00.355
To use the most recent IERS-A file for the whole session::
>>> iers_a = iers.IERS_A.open(iers.IERS_A_URL) # doctest: +SKIP
>>> iers.earth_orientation_table.set(iers_a) # doctest: +SKIP
<ScienceState earth_orientation_table: <IERS_A length=17463>...>
To go back to the default (of `~astropy.utils.iers.IERS_Auto`)::
>>> iers.earth_orientation_table.set(None) # doctest: +SKIP
<ScienceState earth_orientation_table: <IERS_Auto length=17428>...>
"""
_value = None
@classmethod
def validate(cls, value):
if value is None:
value = IERS_Auto.open()
if not isinstance(value, IERS):
raise ValueError("earth_orientation_table requires an IERS Table.")
return value
class LeapSeconds(QTable):
"""Leap seconds class, holding TAI-UTC differences.
The table should hold columns 'year', 'month', 'tai_utc'.
Methods are provided to initialize the table from IERS ``Leap_Second.dat``,
IETF/ntp ``leap-seconds.list``, or built-in ERFA/SOFA, and to update the
list used by ERFA.
Notes
-----
Astropy has a built-in ``iers.IERS_LEAP_SECONDS_FILE``. Up to date versions
can be downloaded from ``iers.IERS_LEAP_SECONDS_URL`` or
``iers.LEAP_SECONDS_LIST_URL``. Many systems also store a version
of ``leap-seconds.list`` for use with ``ntp`` (e.g., on Debian/Ubuntu
systems, ``/usr/share/zoneinfo/leap-seconds.list``).
To prevent querying internet resources if the available local leap second
file(s) are out of date, set ``iers.conf.auto_download = False``. This
must be done prior to performing any ``Time`` scale transformations related
to UTC (e.g. converting from UTC to TAI).
"""
# Note: Time instances in this class should use scale='tai' to avoid
# needing leap seconds in their creation or interpretation.
_re_expires = re.compile(r'^#.*File expires on[:\s]+(\d+\s\w+\s\d+)\s*$')
_expires = None
_auto_open_files = ['erfa',
IERS_LEAP_SECOND_FILE,
'system_leap_second_file',
'iers_leap_second_auto_url',
'ietf_leap_second_auto_url']
"""Files or conf attributes to try in auto_open."""
@classmethod
def open(cls, file=None, cache=False):
"""Open a leap-second list.
Parameters
----------
file : path-like or None
Full local or network path to the file holding leap-second data,
for passing on to the various ``from_`` class methods.
If 'erfa', return the data used by the ERFA library.
If `None`, use default locations from file and configuration to
find a table that is not expired.
cache : bool
Whether to use cache. Defaults to False, since leap-second files
are regularly updated.
Returns
-------
leap_seconds : `~astropy.utils.iers.LeapSeconds`
Table with 'year', 'month', and 'tai_utc' columns, plus possibly
others.
Notes
-----
Bulletin C is released about 10 days after a possible leap second is
introduced, i.e., mid-January or mid-July. Expiration days are thus
generally at least 150 days after the present. For the auto-loading,
a list comprised of the table shipped with astropy, and files and
URLs in `~astropy.utils.iers.Conf` are tried, returning the first
that is sufficiently new, or the newest among them all.
"""
if file is None:
return cls.auto_open()
if file.lower() == 'erfa':
return cls.from_erfa()
if urlparse(file).netloc:
file = download_file(file, cache=cache)
# Just try both reading methods.
try:
return cls.from_iers_leap_seconds(file)
except Exception:
return cls.from_leap_seconds_list(file)
@staticmethod
def _today():
# Get current day in scale='tai' without going through a scale change
# (so we do not need leap seconds).
s = '{0.year:04d}-{0.month:02d}-{0.day:02d}'.format(datetime.utcnow())
return Time(s, scale='tai', format='iso', out_subfmt='date')
@classmethod
def auto_open(cls, files=None):
"""Attempt to get an up-to-date leap-second list.
The routine will try the files in sequence until it finds one
whose expiration date is "good enough" (see below). If none
are good enough, it returns the one with the most recent expiration
date, warning if that file is expired.
For remote files that are cached already, the cached file is tried
first before attempting to retrieve it again.
Parameters
----------
files : list of path-like, optional
List of files/URLs to attempt to open. By default, uses
``cls._auto_open_files``.
Returns
-------
leap_seconds : `~astropy.utils.iers.LeapSeconds`
Up to date leap-second table
Notes
-----
Bulletin C is released about 10 days after a possible leap second is
introduced, i.e., mid-January or mid-July. Expiration days are thus
generally at least 150 days after the present. We look for a file
that expires more than 180 - `~astropy.utils.iers.Conf.auto_max_age`
after the present.
"""
offset = 180 - (30 if conf.auto_max_age is None else conf.auto_max_age)
good_enough = cls._today() + TimeDelta(offset, format='jd')
if files is None:
# Basic files to go over (entries in _auto_open_files can be
# configuration items, which we want to be sure are up to date).
files = [getattr(conf, f, f) for f in cls._auto_open_files]
# Remove empty entries.
files = [f for f in files if f]
# Our trials start with normal files and remote ones that are
# already in cache. The bools here indicate that the cache
# should be used.
trials = [(f, True) for f in files
if not urlparse(f).netloc or is_url_in_cache(f)]
# If we are allowed to download, we try downloading new versions
# if none of the above worked.
if conf.auto_download:
trials += [(f, False) for f in files if urlparse(f).netloc]
self = None
err_list = []
# Go through all entries, and return the first one that
# is not expired, or the most up to date one.
for f, allow_cache in trials:
if not allow_cache:
clear_download_cache(f)
try:
trial = cls.open(f, cache=True)
except Exception as exc:
err_list.append(exc)
continue
if self is None or trial.expires > self.expires:
self = trial
self.meta['data_url'] = str(f)
if self.expires > good_enough:
break
if self is None:
raise ValueError('none of the files could be read. The '
'following errors were raised:\n' + str(err_list))
if self.expires < self._today() and conf.auto_max_age is not None:
warn('leap-second file is expired.', IERSStaleWarning)
return self
@property
def expires(self):
"""The limit of validity of the table."""
return self._expires
@classmethod
def _read_leap_seconds(cls, file, **kwargs):
"""Read a file, identifying expiration by matching 'File expires'"""
expires = None
# Find expiration date.
with get_readable_fileobj(file) as fh:
lines = fh.readlines()
for line in lines:
match = cls._re_expires.match(line)
if match:
day, month, year = match.groups()[0].split()
month_nb = MONTH_ABBR.index(month[:3]) + 1
expires = Time(f'{year}-{month_nb:02d}-{day}',
scale='tai', out_subfmt='date')
break
else:
raise ValueError(f'did not find expiration date in {file}')
self = cls.read(lines, format='ascii.no_header', **kwargs)
self._expires = expires
return self
@classmethod
def from_iers_leap_seconds(cls, file=IERS_LEAP_SECOND_FILE):
"""Create a table from a file like the IERS ``Leap_Second.dat``.
Parameters
----------
file : path-like, optional
Full local or network path to the file holding leap-second data
in a format consistent with that used by IERS. By default, uses
``iers.IERS_LEAP_SECOND_FILE``.
Notes
-----
The file *must* contain the expiration date in a comment line, like
'# File expires on 28 June 2020'
"""
return cls._read_leap_seconds(
file, names=['mjd', 'day', 'month', 'year', 'tai_utc'])
@classmethod
def from_leap_seconds_list(cls, file):
"""Create a table from a file like the IETF ``leap-seconds.list``.
Parameters
----------
file : path-like, optional
Full local or network path to the file holding leap-second data
in a format consistent with that used by IETF. Up to date versions
can be retrieved from ``iers.IETF_LEAP_SECOND_URL``.
Notes
-----
The file *must* contain the expiration date in a comment line, like
'# File expires on: 28 June 2020'
"""
from astropy.io.ascii import convert_numpy # Here to avoid circular import
names = ['ntp_seconds', 'tai_utc', 'comment', 'day', 'month', 'year']
# Note: ntp_seconds does not fit in 32 bit, so causes problems on
# 32-bit systems without the np.int64 converter.
self = cls._read_leap_seconds(
file, names=names, include_names=names[:2],
converters={'ntp_seconds': [convert_numpy(np.int64)]})
self['mjd'] = (self['ntp_seconds']/86400 + 15020).round()
# Note: cannot use Time.ymdhms, since that might require leap seconds.
isot = Time(self['mjd'], format='mjd', scale='tai').isot
ymd = np.array([[int(part) for part in t.partition('T')[0].split('-')]
for t in isot])
self['year'], self['month'], self['day'] = ymd.T
return self
@classmethod
def from_erfa(cls, built_in=False):
"""Create table from the leap-second list in ERFA.
Parameters
----------
built_in : bool
If `False` (default), retrieve the list currently used by ERFA,
which may have been updated. If `True`, retrieve the list shipped
with erfa.
"""
current = cls(erfa.leap_seconds.get())
current._expires = Time('{0.year:04d}-{0.month:02d}-{0.day:02d}'
.format(erfa.leap_seconds.expires),
scale='tai')
if not built_in:
return current
try:
erfa.leap_seconds.set(None) # reset to defaults
return cls.from_erfa(built_in=False)
finally:
erfa.leap_seconds.set(current)
def update_erfa_leap_seconds(self, initialize_erfa=False):
"""Add any leap seconds not already present to the ERFA table.
This method matches leap seconds with those present in the ERFA table,
and extends the latter as necessary.
Parameters
----------
initialize_erfa : bool, or 'only', or 'empty'
Initialize the ERFA leap second table to its built-in value before
trying to expand it. This is generally not needed but can help
in case it somehow got corrupted. If equal to 'only', the ERFA
table is reinitialized and no attempt it made to update it.
If 'empty', the leap second table is emptied before updating, i.e.,
it is overwritten altogether (note that this may break things in
surprising ways, as most leap second tables do not include pre-1970
pseudo leap-seconds; you were warned).
Returns
-------
n_update : int
Number of items updated.
Raises
------
ValueError
If the leap seconds in the table are not on 1st of January or July,
or if the matches are inconsistent. This would normally suggest
a corrupted leap second table, but might also indicate that the
ERFA table was corrupted. If needed, the ERFA table can be reset
by calling this method with an appropriate value for
``initialize_erfa``.
"""
if initialize_erfa == 'empty':
# Initialize to empty and update is the same as overwrite.
erfa.leap_seconds.set(self)
return len(self)
if initialize_erfa:
erfa.leap_seconds.set()
if initialize_erfa == 'only':
return 0
return erfa.leap_seconds.update(self)
|
b7ce4f787971738eead77361fcfa90446e8003e4e7d08b7718c430e8ed78f887 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import os
import sys
import stat
import errno
import base64
import random
import shutil
import hashlib
import pathlib
import platform
import tempfile
import warnings
import itertools
import contextlib
import urllib.error
import urllib.parse
import urllib.request
from itertools import islice
from concurrent.futures import ThreadPoolExecutor
from tempfile import NamedTemporaryFile, TemporaryDirectory
import py.path
import pytest
from astropy import units as _u # u is taken
from astropy.config import paths
import astropy.utils.data
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.data import (
CacheMissingWarning,
CacheDamaged,
conf,
_deltemps,
compute_hash,
download_file,
cache_contents,
_tempfilestodel,
get_cached_urls,
is_url_in_cache,
cache_total_size,
get_file_contents,
check_download_cache,
clear_download_cache,
get_pkg_data_fileobj,
get_readable_fileobj,
import_file_to_cache,
export_download_cache,
get_pkg_data_contents,
get_pkg_data_filename,
import_download_cache,
get_free_space_in_dir,
check_free_space_in_dir,
_get_download_cache_loc,
download_files_in_parallel,
is_url,
get_pkg_data_path
)
CI = os.environ.get('CI', False) == "true"
TESTURL = "http://www.astropy.org"
TESTURL2 = "http://www.astropy.org/about.html"
TESTURL_SSL = "https://www.astropy.org"
TESTLOCAL = get_pkg_data_filename(os.path.join("data", "local.dat"))
# NOTE: Python can be built without bz2 or lzma.
from astropy.utils.compat.optional_deps import HAS_BZ2, HAS_LZMA
# For when we need "some" test URLs
FEW = 5
# For stress testing the locking system using multiprocessing
N_PARALLEL_HAMMER = 5 # as high as 500 to replicate a bug
# For stress testing the locking system using threads
# (cheaper, works with coverage)
N_THREAD_HAMMER = 10 # as high as 1000 to replicate a bug
def can_rename_directory_in_use():
with TemporaryDirectory() as d:
d1 = os.path.join(d, "a")
d2 = os.path.join(d, "b")
f1 = os.path.join(d1, "file")
os.mkdir(d1)
with open(f1, "wt") as f:
f.write("some contents\n")
try:
with open(f1, "rt"):
os.rename(d1, d2)
except PermissionError:
return False
else:
return True
CAN_RENAME_DIRECTORY_IN_USE = can_rename_directory_in_use()
def url_to(path):
return pathlib.Path(path).resolve().as_uri()
@pytest.fixture
def valid_urls(tmpdir):
def _valid_urls(tmpdir):
for i in itertools.count():
c = os.urandom(16).hex()
fn = os.path.join(tmpdir, "valid_" + str(i))
with open(fn, "w") as f:
f.write(c)
u = url_to(fn)
yield u, c
return _valid_urls(tmpdir)
@pytest.fixture
def invalid_urls(tmpdir):
def _invalid_urls(tmpdir):
for i in itertools.count():
fn = os.path.join(tmpdir, "invalid_" + str(i))
if not os.path.exists(fn):
yield url_to(fn)
return _invalid_urls(tmpdir)
@pytest.fixture
def temp_cache(tmpdir):
with paths.set_temp_cache(tmpdir):
yield None
check_download_cache()
def change_tree_permission(d, writable=False):
if writable:
dirperm = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
fileperm = stat.S_IRUSR | stat.S_IWUSR
else:
dirperm = stat.S_IRUSR | stat.S_IXUSR
fileperm = stat.S_IRUSR
for dirpath, dirnames, filenames in os.walk(d):
os.chmod(dirpath, dirperm)
for f in filenames:
os.chmod(os.path.join(dirpath, f), fileperm)
def is_dir_readonly(d):
try:
with NamedTemporaryFile(dir=d):
return False
except PermissionError:
return True
@contextlib.contextmanager
def readonly_dir(d):
try:
change_tree_permission(d, writable=False)
yield
finally:
change_tree_permission(d, writable=True)
@pytest.fixture
def readonly_cache(tmpdir, valid_urls):
with TemporaryDirectory(dir=tmpdir) as d:
# other fixtures use the same tmpdir so we need a subdirectory
# to make into the cache
d = pathlib.Path(d)
with paths.set_temp_cache(d):
us = set(u for u, c in islice(valid_urls, FEW))
urls = {u: download_file(u, cache=True) for u in us}
files = set(d.iterdir())
with readonly_dir(d):
if not is_dir_readonly(d):
pytest.skip("Unable to make directory readonly")
yield urls
assert set(d.iterdir()) == files
check_download_cache()
@pytest.fixture
def fake_readonly_cache(tmpdir, valid_urls, monkeypatch):
def no_mkdir(path, mode=None):
raise OSError(errno.EPERM,
"os.mkdir monkeypatched out")
def no_mkdtemp(*args, **kwargs):
"""On Windows, mkdtemp uses mkdir in a loop and therefore hangs
with it monkeypatched out.
"""
raise OSError(errno.EPERM,
"os.mkdtemp monkeypatched out")
def no_TemporaryDirectory(*args, **kwargs):
raise OSError(errno.EPERM,
"_SafeTemporaryDirectory monkeypatched out")
with TemporaryDirectory(dir=tmpdir) as d:
# other fixtures use the same tmpdir so we need a subdirectory
# to make into the cache
d = pathlib.Path(d)
with paths.set_temp_cache(d):
us = set(u for u, c in islice(valid_urls, FEW))
urls = {u: download_file(u, cache=True) for u in us}
files = set(d.iterdir())
monkeypatch.setattr(os, "mkdir", no_mkdir)
monkeypatch.setattr(tempfile, "mkdtemp", no_mkdtemp)
monkeypatch.setattr(astropy.utils.data,
"_SafeTemporaryDirectory",
no_TemporaryDirectory)
yield urls
assert set(d.iterdir()) == files
check_download_cache()
def test_download_file_basic(valid_urls, temp_cache):
u, c = next(valid_urls)
assert get_file_contents(download_file(u, cache=False)) == c
assert not is_url_in_cache(u)
assert get_file_contents(download_file(u, cache=True)) == c # Cache miss
assert is_url_in_cache(u)
assert get_file_contents(download_file(u, cache=True)) == c # Cache hit
assert get_file_contents(download_file(u, cache=True, sources=[])) == c
def test_download_file_absolute_path(valid_urls, temp_cache):
def is_abs(p):
return p == os.path.abspath(p)
u, c = next(valid_urls)
assert is_abs(download_file(u, cache=False)) # no cache
assert is_abs(download_file(u, cache=True)) # not in cache
assert is_abs(download_file(u, cache=True)) # in cache
for k, v in cache_contents().items():
assert is_abs(v)
def test_unicode_url(valid_urls, temp_cache):
u, c = next(valid_urls)
unicode_url = "http://é—☃—è.com"
download_file(unicode_url, cache=False, sources=[u])
download_file(unicode_url, cache=True, sources=[u])
download_file(unicode_url, cache=True, sources=[])
assert is_url_in_cache(unicode_url)
assert unicode_url in cache_contents()
def test_too_long_url(valid_urls, temp_cache):
u, c = next(valid_urls)
long_url = "http://"+"a"*256+".com"
download_file(long_url, cache=False, sources=[u])
download_file(long_url, cache=True, sources=[u])
download_file(long_url, cache=True, sources=[])
def test_case_collision(valid_urls, temp_cache):
u, c = next(valid_urls)
u2, c2 = next(valid_urls)
f1 = download_file("http://example.com/thing", cache=True, sources=[u])
f2 = download_file("http://example.com/THING", cache=True, sources=[u2])
assert f1 != f2
assert get_file_contents(f1) != get_file_contents(f2)
def test_domain_name_case(valid_urls, temp_cache):
u, c = next(valid_urls)
download_file("http://Example.com/thing", cache=True, sources=[u])
assert is_url_in_cache("http://EXAMPLE.com/thing")
download_file("http://EXAMPLE.com/thing", cache=True, sources=[])
assert is_url_in_cache("Http://example.com/thing")
download_file("Http://example.com/thing", cache=True, sources=[])
@pytest.mark.remote_data(source="astropy")
def test_download_nocache_from_internet():
fnout = download_file(TESTURL, cache=False)
assert os.path.isfile(fnout)
@pytest.fixture
def a_binary_file(tmp_path):
fn = tmp_path / "file"
b_contents = b"\xde\xad\xbe\xef"
with open(fn, "wb") as f:
f.write(b_contents)
yield fn, b_contents
@pytest.fixture
def a_file(tmp_path):
fn = tmp_path / "file.txt"
contents = "contents\n"
with open(fn, "w") as f:
f.write(contents)
yield fn, contents
def test_temp_cache(tmpdir):
dldir0 = _get_download_cache_loc()
check_download_cache()
with paths.set_temp_cache(tmpdir):
dldir1 = _get_download_cache_loc()
check_download_cache()
assert dldir1 != dldir0
dldir2 = _get_download_cache_loc()
check_download_cache()
assert dldir2 != dldir1
assert dldir2 == dldir0
# Check that things are okay even if we exit via an exception
class Special(Exception):
pass
try:
with paths.set_temp_cache(tmpdir):
dldir3 = _get_download_cache_loc()
check_download_cache()
assert dldir3 == dldir1
raise Special
except Special:
pass
dldir4 = _get_download_cache_loc()
check_download_cache()
assert dldir4 != dldir3
assert dldir4 == dldir0
@pytest.mark.parametrize("parallel", [False, True])
def test_download_with_sources_and_bogus_original(
valid_urls, invalid_urls, temp_cache, parallel):
# This is a combined test because the parallel version triggered a nasty
# bug and I was trying to track it down by comparing with the non-parallel
# version. I think the bug was that the parallel downloader didn't respect
# temporary cache settings.
# Make a big list of test URLs
u, c = next(valid_urls)
# as tuples (URL, right_content, wrong_content)
urls = [(u, c, None)]
# where to download the contents
sources = {}
# Set up some URLs to download where the "true" URL is not in the sources
# list; make the true URL valid with different contents so we can tell if
# it was loaded by mistake.
for i, (um, c_bad) in enumerate(islice(valid_urls, FEW)):
assert not is_url_in_cache(um)
sources[um] = []
# For many of them the sources list starts with invalid URLs
for iu in islice(invalid_urls, i):
sources[um].append(iu)
u, c = next(valid_urls)
sources[um].append(u)
urls.append((um, c, c_bad))
# Now fetch them all
if parallel:
rs = download_files_in_parallel([u for (u, c, c_bad) in urls],
cache=True,
sources=sources)
else:
rs = [
download_file(u, cache=True, sources=sources.get(u, None))
for (u, c, c_bad) in urls
]
assert len(rs) == len(urls)
for r, (u, c, c_bad) in zip(rs, urls):
assert get_file_contents(r) == c
assert get_file_contents(r) != c_bad
assert is_url_in_cache(u)
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_download_file_threaded_many(temp_cache, valid_urls):
"""Hammer download_file with multiple threaded requests.
The goal is to stress-test the locking system. Normal parallel downloading
also does this but coverage tools lose track of which paths are explored.
"""
urls = list(islice(valid_urls, N_THREAD_HAMMER))
with ThreadPoolExecutor(max_workers=len(urls)) as P:
r = list(P.map(lambda u: download_file(u, cache=True),
[u for (u, c) in urls]))
check_download_cache()
assert len(r) == len(urls)
for r, (u, c) in zip(r, urls):
assert get_file_contents(r) == c
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_threaded_segfault(valid_urls):
"""Demonstrate urllib's segfault."""
def slurp_url(u):
with urllib.request.urlopen(u) as remote:
block = True
while block:
block = remote.read(1024)
urls = list(islice(valid_urls, N_THREAD_HAMMER))
with ThreadPoolExecutor(max_workers=len(urls)) as P:
list(P.map(lambda u: slurp_url(u),
[u for (u, c) in urls]))
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_download_file_threaded_many_partial_success(
temp_cache, valid_urls, invalid_urls):
"""Hammer download_file with multiple threaded requests.
Because some of these requests fail, the locking context manager is
exercised with exceptions as well as success returns. I do not expect many
surprises from the threaded version, but the process version gave trouble
here.
"""
urls = []
contents = {}
for (u, c), i in islice(zip(valid_urls, invalid_urls), N_THREAD_HAMMER):
urls.append(u)
contents[u] = c
urls.append(i)
def get(u):
try:
return download_file(u, cache=True)
except OSError:
return None
with ThreadPoolExecutor(max_workers=len(urls)) as P:
r = list(P.map(get, urls))
check_download_cache()
assert len(r) == len(urls)
for r, u in zip(r, urls):
if u in contents:
assert get_file_contents(r) == contents[u]
else:
assert r is None
def test_clear_download_cache(valid_urls):
u1, c1 = next(valid_urls)
download_file(u1, cache=True)
u2, c2 = next(valid_urls)
download_file(u2, cache=True)
assert is_url_in_cache(u2)
clear_download_cache(u2)
assert not is_url_in_cache(u2)
assert is_url_in_cache(u1)
u3, c3 = next(valid_urls)
f3 = download_file(u3, cache=True)
assert is_url_in_cache(u3)
clear_download_cache(f3)
assert not is_url_in_cache(u3)
assert is_url_in_cache(u1)
u4, c4 = next(valid_urls)
f4 = download_file(u4, cache=True)
assert is_url_in_cache(u4)
clear_download_cache(compute_hash(f4))
assert not is_url_in_cache(u4)
assert is_url_in_cache(u1)
def test_clear_download_multiple_references_doesnt_corrupt_storage(temp_cache, tmpdir):
"""Check that files with the same hash don't confuse the storage."""
content = "Test data; doesn't matter much.\n"
def make_url():
with NamedTemporaryFile("w", dir=str(tmpdir), delete=False) as f:
f.write(content)
url = url_to(f.name)
clear_download_cache(url)
filename = download_file(url, cache=True)
return url, filename
a_url, a_filename = make_url()
clear_download_cache(a_filename)
assert not is_url_in_cache(a_url)
f_url, f_filename = make_url()
g_url, g_filename = make_url()
assert f_url != g_url
assert is_url_in_cache(f_url)
assert is_url_in_cache(g_url)
clear_download_cache(f_url)
assert not is_url_in_cache(f_url)
assert is_url_in_cache(g_url)
assert os.path.exists(
g_filename
), "Contents should not be deleted while a reference exists"
clear_download_cache(g_url)
assert not os.path.exists(
g_filename
), "No reference exists any more, file should be deleted"
@pytest.mark.parametrize("use_cache", [False, True])
def test_download_file_local_cache_survives(tmpdir, temp_cache, use_cache):
"""Confirm that downloading a local file does not delete it.
When implemented with urlretrieve (rather than urlopen) local files are
not copied to create temporaries, so importing them to the cache deleted
the original from wherever it was in the filesystem. I lost some built-in
astropy data.
"""
fn = tmpdir / "file"
contents = "some text"
with open(fn, "w") as f:
f.write(contents)
u = url_to(fn)
f = download_file(u, cache=use_cache)
assert fn not in _tempfilestodel, "File should not be deleted!"
assert os.path.isfile(fn), "File should not be deleted!"
assert get_file_contents(f) == contents
def test_sources_normal(temp_cache, valid_urls, invalid_urls):
primary, contents = next(valid_urls)
fallback1 = next(invalid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_fallback(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_ignore_primary(temp_cache, valid_urls, invalid_urls):
primary, bogus = next(valid_urls)
fallback1, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_multiple(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1 = next(invalid_urls)
fallback2, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1, fallback2])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
assert not is_url_in_cache(fallback2)
def test_sources_multiple_missing(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1 = next(invalid_urls)
fallback2 = next(invalid_urls)
with pytest.raises(urllib.error.URLError):
download_file(primary, cache=True, sources=[primary, fallback1, fallback2])
assert not is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
assert not is_url_in_cache(fallback2)
def test_update_url(tmpdir, temp_cache):
with TemporaryDirectory(dir=tmpdir) as d:
f_name = os.path.join(d, "f")
with open(f_name, "w") as f:
f.write("old")
f_url = url_to(f.name)
assert get_file_contents(download_file(f_url, cache=True)) == "old"
with open(f_name, "w") as f:
f.write("new")
assert get_file_contents(download_file(f_url, cache=True)) == "old"
assert get_file_contents(download_file(f_url, cache="update")) == "new"
# Now the URL doesn't exist any more.
assert not os.path.exists(f_name)
with pytest.raises(urllib.error.URLError):
# Direct download should fail
download_file(f_url, cache=False)
assert get_file_contents(download_file(f_url, cache=True)) == "new", \
"Cached version should still exist"
with pytest.raises(urllib.error.URLError):
# cannot download new version to check for updates
download_file(f_url, cache="update")
assert get_file_contents(download_file(f_url, cache=True)) == "new", \
"Failed update should not remove the current version"
@pytest.mark.remote_data(source="astropy")
def test_download_noprogress():
fnout = download_file(TESTURL, cache=False, show_progress=False)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_download_cache():
download_dir = _get_download_cache_loc()
# Download the test URL and make sure it exists, then clear just that
# URL and make sure it got deleted.
fnout = download_file(TESTURL, cache=True)
assert os.path.isdir(download_dir)
assert os.path.isfile(fnout)
clear_download_cache(TESTURL)
assert not os.path.exists(fnout)
# Clearing download cache succeeds even if the URL does not exist.
clear_download_cache("http://this_was_never_downloaded_before.com")
# Make sure lockdir was released
lockdir = os.path.join(download_dir, "lock")
assert not os.path.isdir(lockdir), "Cache dir lock was not released!"
@pytest.mark.remote_data(source="astropy")
def test_download_certificate_verification_failed():
"""Tests for https://github.com/astropy/astropy/pull/10434"""
# First test the expected exception when download fails due to a
# certificate verification error; we simulate this by passing a bogus
# CA directory to the ssl_context argument
ssl_context = {'cafile': None, 'capath': '/does/not/exist'}
msg = f'Verification of TLS/SSL certificate at {TESTURL_SSL} failed'
with pytest.raises(urllib.error.URLError, match=msg):
download_file(TESTURL_SSL, cache=False, ssl_context=ssl_context)
with pytest.warns(AstropyWarning, match=msg) as warning_lines:
fnout = download_file(TESTURL_SSL, cache=False,
ssl_context=ssl_context, allow_insecure=True)
assert len(warning_lines) == 1
assert os.path.isfile(fnout)
def test_download_cache_after_clear(tmpdir, temp_cache, valid_urls):
testurl, contents = next(valid_urls)
# Test issues raised in #4427 with clear_download_cache() without a URL,
# followed by subsequent download.
download_dir = _get_download_cache_loc()
fnout = download_file(testurl, cache=True)
assert os.path.isfile(fnout)
clear_download_cache()
assert not os.path.exists(fnout)
assert not os.path.exists(download_dir)
fnout = download_file(testurl, cache=True)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_download_parallel_from_internet_works(temp_cache):
main_url = conf.dataurl
mirror_url = conf.dataurl_mirror
fileloc = "intersphinx/README"
urls = []
sources = {}
for s in ["", fileloc]:
urls.append(main_url + s)
sources[urls[-1]] = [urls[-1], mirror_url+s]
fnout = download_files_in_parallel(urls, sources=sources)
assert all([os.path.isfile(f) for f in fnout]), fnout
@pytest.mark.parametrize("method", [None, "spawn"])
def test_download_parallel_fills_cache(tmpdir, valid_urls, method):
urls = []
# tmpdir is shared between many tests, and that can cause weird
# interactions if we set the temporary cache too directly
with paths.set_temp_cache(tmpdir):
for um, c in islice(valid_urls, FEW):
assert not is_url_in_cache(um)
urls.append((um, c))
rs = download_files_in_parallel(
[u for (u, c) in urls], multiprocessing_start_method=method
)
assert len(rs) == len(urls)
url_set = set(u for (u, c) in urls)
assert url_set <= set(get_cached_urls())
for r, (u, c) in zip(rs, urls):
assert get_file_contents(r) == c
check_download_cache()
assert not url_set.intersection(get_cached_urls())
check_download_cache()
def test_download_parallel_with_empty_sources(valid_urls, temp_cache):
urls = []
sources = {}
for um, c in islice(valid_urls, FEW):
assert not is_url_in_cache(um)
urls.append((um, c))
rs = download_files_in_parallel([u for (u, c) in urls], sources=sources)
assert len(rs) == len(urls)
# u = set(u for (u, c) in urls)
# assert u <= set(get_cached_urls())
check_download_cache()
for r, (u, c) in zip(rs, urls):
assert get_file_contents(r) == c
def test_download_parallel_with_sources_and_bogus_original(
valid_urls, invalid_urls, temp_cache
):
u, c = next(valid_urls)
urls = [(u, c, None)]
sources = {}
for i, (um, c_bad) in enumerate(islice(valid_urls, FEW)):
assert not is_url_in_cache(um)
sources[um] = []
for iu in islice(invalid_urls, i):
sources[um].append(iu)
u, c = next(valid_urls)
sources[um].append(u)
urls.append((um, c, c_bad))
rs = download_files_in_parallel([u for (u, c, c_bad) in urls], sources=sources)
assert len(rs) == len(urls)
# u = set(u for (u, c, c_bad) in urls)
# assert u <= set(get_cached_urls())
for r, (u, c, c_bad) in zip(rs, urls):
assert get_file_contents(r) == c
assert get_file_contents(r) != c_bad
def test_download_parallel_many(temp_cache, valid_urls):
td = list(islice(valid_urls, N_PARALLEL_HAMMER))
r = download_files_in_parallel([u for (u, c) in td])
assert len(r) == len(td)
for r, (u, c) in zip(r, td):
assert get_file_contents(r) == c
def test_download_parallel_partial_success(temp_cache, valid_urls, invalid_urls):
"""Check that a partially successful download works.
Even in the presence of many requested URLs, presumably hitting all the
parallelism this system can manage, a download failure leads to a tidy
shutdown.
"""
td = list(islice(valid_urls, N_PARALLEL_HAMMER))
u_bad = next(invalid_urls)
with pytest.raises(urllib.request.URLError):
download_files_in_parallel([u_bad] + [u for (u, c) in td])
# Actually some files may get downloaded, others not.
# Is this good? Should we stubbornly keep trying?
# assert not any([is_url_in_cache(u) for (u, c) in td])
@pytest.mark.slow
def test_download_parallel_partial_success_lock_safe(temp_cache, valid_urls, invalid_urls):
"""Check that a partially successful parallel download leaves the cache unlocked.
This needs to be repeated many times because race conditions are what cause
this sort of thing, especially situations where a process might be forcibly
shut down while it holds the lock.
"""
s = random.getstate()
try:
random.seed(0)
for _ in range(N_PARALLEL_HAMMER):
td = list(islice(valid_urls, FEW))
u_bad = next(invalid_urls)
urls = [u_bad] + [u for (u, c) in td]
random.shuffle(urls)
with pytest.raises(urllib.request.URLError):
download_files_in_parallel(urls)
finally:
random.setstate(s)
def test_download_parallel_update(temp_cache, tmpdir):
td = []
for i in range(N_PARALLEL_HAMMER):
c = f"{i:04d}"
fn = os.path.join(tmpdir, c)
with open(fn, "w") as f:
f.write(c)
u = url_to(fn)
clear_download_cache(u)
td.append((fn, u, c))
r1 = download_files_in_parallel([u for (fn, u, c) in td])
assert len(r1) == len(td)
for r_1, (fn, u, c) in zip(r1, td):
assert get_file_contents(r_1) == c
td2 = []
for (fn, u, c) in td:
c_plus = c + " updated"
fn = os.path.join(tmpdir, c)
with open(fn, "w") as f:
f.write(c_plus)
td2.append((fn, u, c, c_plus))
r2 = download_files_in_parallel([u for (fn, u, c) in td], cache=True)
assert len(r2) == len(td)
for r_2, (fn, u, c, c_plus) in zip(r2, td2):
assert get_file_contents(r_2) == c
assert c != c_plus
r3 = download_files_in_parallel([u for (fn, u, c) in td], cache="update")
assert len(r3) == len(td)
for r_3, (fn, u, c, c_plus) in zip(r3, td2):
assert get_file_contents(r_3) != c
assert get_file_contents(r_3) == c_plus
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_update_parallel(temp_cache, valid_urls):
u, c = next(valid_urls)
u2, c2 = next(valid_urls)
f = download_file(u, cache=True)
assert get_file_contents(f) == c
def update(i):
return download_file(u, cache="update", sources=[u2])
with ThreadPoolExecutor(max_workers=N_THREAD_HAMMER) as P:
r = set(P.map(update, range(N_THREAD_HAMMER)))
check_download_cache()
for f in r:
assert get_file_contents(f) == c2
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_update_parallel_multi(temp_cache, valid_urls):
u, c = next(valid_urls)
iucs = list(islice(valid_urls, N_THREAD_HAMMER))
f = download_file(u, cache=True)
assert get_file_contents(f) == c
def update(uc):
u2, c2 = uc
return download_file(u, cache="update", sources=[u2]), c2
with ThreadPoolExecutor(max_workers=len(iucs)) as P:
r = list(P.map(update, iucs))
check_download_cache()
assert any(get_file_contents(f) == c for (f, c) in r)
@pytest.mark.remote_data(source="astropy")
def test_url_nocache():
with get_readable_fileobj(TESTURL, cache=False, encoding="utf-8") as page:
assert page.read().find("Astropy") > -1
def test_find_by_hash(valid_urls, temp_cache):
testurl, contents = next(valid_urls)
p = download_file(testurl, cache=True)
hash = compute_hash(p)
hashstr = "hash/" + hash
fnout = get_pkg_data_filename(hashstr)
assert os.path.isfile(fnout)
clear_download_cache(fnout)
assert not os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_find_invalid():
# this is of course not a real data file and not on any remote server, but
# it should *try* to go to the remote server
with pytest.raises(urllib.error.URLError):
get_pkg_data_filename(
"kjfrhgjklahgiulrhgiuraehgiurhgiuhreglhurieghruelighiuerahiulruli"
)
@pytest.mark.parametrize("package", [None, "astropy", "numpy"])
def test_get_invalid(package):
"""Test can create a file path to an invalid file."""
path = get_pkg_data_path("kjfrhgjkla", "hgiulrhgiu", package=package)
assert not os.path.isfile(path)
assert not os.path.isdir(path)
# Package data functions
@pytest.mark.parametrize(
("filename"), ["local.dat", "local.dat.gz", "local.dat.bz2", "local.dat.xz"]
)
def test_local_data_obj(filename):
if ((not HAS_BZ2 and "bz2" in filename) or
(not HAS_LZMA and "xz" in filename)):
with pytest.raises(ValueError) as e:
with get_pkg_data_fileobj(
os.path.join("data", filename), encoding="binary"
) as f:
f.readline()
# assert f.read().rstrip() == b'CONTENT'
assert " format files are not supported" in str(e.value)
else:
with get_pkg_data_fileobj(
os.path.join("data", filename), encoding="binary"
) as f:
f.readline()
assert f.read().rstrip() == b"CONTENT"
@pytest.fixture(params=["invalid.dat.bz2", "invalid.dat.gz"])
def bad_compressed(request, tmpdir):
# These contents have valid headers for their respective file formats, but
# are otherwise malformed and invalid.
bz_content = b"BZhinvalid"
gz_content = b"\x1f\x8b\x08invalid"
datafile = tmpdir.join(request.param)
filename = datafile.strpath
if filename.endswith(".bz2"):
contents = bz_content
elif filename.endswith(".gz"):
contents = gz_content
else:
contents = "invalid"
datafile.write(contents, mode="wb")
return filename
def test_local_data_obj_invalid(bad_compressed):
is_bz2 = bad_compressed.endswith(".bz2")
is_xz = bad_compressed.endswith(".xz")
# Note, since these invalid files are created on the fly in order to avoid
# problems with detection by antivirus software
# (see https://github.com/astropy/astropy/issues/6520), it is no longer
# possible to use ``get_pkg_data_fileobj`` to read the files. Technically,
# they're not local anymore: they just live in a temporary directory
# created by pytest. However, we can still use get_readable_fileobj for the
# test.
if (not HAS_BZ2 and is_bz2) or (not HAS_LZMA and is_xz):
with pytest.raises(ModuleNotFoundError,
match=r'does not provide the [lb]z[2m]a? module\.'):
with get_readable_fileobj(bad_compressed, encoding="binary") as f:
f.read()
else:
with get_readable_fileobj(bad_compressed, encoding="binary") as f:
assert f.read().rstrip().endswith(b"invalid")
def test_local_data_name():
assert os.path.isfile(TESTLOCAL) and TESTLOCAL.endswith("local.dat")
# TODO: if in the future, the root data/ directory is added in, the below
# test should be uncommented and the README.rst should be replaced with
# whatever file is there
# get something in the astropy root
# fnout2 = get_pkg_data_filename('../../data/README.rst')
# assert os.path.isfile(fnout2) and fnout2.endswith('README.rst')
def test_data_name_third_party_package():
"""Regression test for issue #1256
Tests that `get_pkg_data_filename` works in a third-party package that
doesn't make any relative imports from the module it's used from.
Uses a test package under ``data/test_package``.
"""
# Get the actual data dir:
data_dir = os.path.join(os.path.dirname(__file__), "data")
sys.path.insert(0, data_dir)
try:
import test_package
filename = test_package.get_data_filename()
assert os.path.normcase(filename) == (
os.path.normcase(os.path.join(data_dir, "test_package", "data", "foo.txt"))
)
finally:
sys.path.pop(0)
def test_local_data_nonlocalfail():
# this would go *outside* the astropy tree
with pytest.raises(RuntimeError):
get_pkg_data_filename("../../../data/README.rst")
def test_compute_hash(tmpdir):
rands = b"1234567890abcdefghijklmnopqrstuvwxyz"
filename = tmpdir.join("tmp.dat").strpath
with open(filename, "wb") as ntf:
ntf.write(rands)
ntf.flush()
chhash = compute_hash(filename)
shash = hashlib.md5(rands).hexdigest()
assert chhash == shash
def test_get_pkg_data_contents():
with get_pkg_data_fileobj("data/local.dat") as f:
contents1 = f.read()
contents2 = get_pkg_data_contents("data/local.dat")
assert contents1 == contents2
@pytest.mark.remote_data(source="astropy")
def test_data_noastropy_fallback(monkeypatch):
"""
Tests to make sure the default behavior when the cache directory can't
be located is correct
"""
# better yet, set the configuration to make sure the temp files are deleted
conf.delete_temporary_downloads_at_exit = True
# make sure the config and cache directories are not searched
monkeypatch.setenv("XDG_CONFIG_HOME", "foo")
monkeypatch.delenv("XDG_CONFIG_HOME")
monkeypatch.setenv("XDG_CACHE_HOME", "bar")
monkeypatch.delenv("XDG_CACHE_HOME")
monkeypatch.setattr(paths.set_temp_config, "_temp_path", None)
monkeypatch.setattr(paths.set_temp_cache, "_temp_path", None)
# make sure the _find_or_create_astropy_dir function fails as though the
# astropy dir could not be accessed
def osraiser(dirnm, linkto, pkgname=None):
raise OSError()
monkeypatch.setattr(paths, '_find_or_create_root_dir', osraiser)
with pytest.raises(OSError):
# make sure the config dir search fails
paths.get_cache_dir(rootname='astropy')
with pytest.warns(CacheMissingWarning) as warning_lines:
fnout = download_file(TESTURL, cache=True)
n_warns = len(warning_lines)
partial_warn_msgs = ['remote data cache could not be accessed', 'temporary file']
if n_warns == 4:
partial_warn_msgs.extend(['socket', 'socket'])
for wl in warning_lines:
cur_w = str(wl).lower()
for i, partial_msg in enumerate(partial_warn_msgs):
if partial_msg in cur_w:
del partial_warn_msgs[i]
break
assert len(partial_warn_msgs) == 0, f'Got some unexpected warnings: {partial_warn_msgs}'
assert n_warns in (2, 4), f'Expected 2 or 4 warnings, got {n_warns}'
assert os.path.isfile(fnout)
# clearing the cache should be a no-up that doesn't affect fnout
with pytest.warns(CacheMissingWarning,
match=r".*Not clearing data cache - cache inaccessible.*"):
clear_download_cache(TESTURL)
assert os.path.isfile(fnout)
# now remove it so tests don't clutter up the temp dir this should get
# called at exit, anyway, but we do it here just to make sure it's working
# correctly
_deltemps()
assert not os.path.isfile(fnout)
# now try with no cache
fnnocache = download_file(TESTURL, cache=False)
with open(fnnocache, "rb") as page:
assert page.read().decode("utf-8").find("Astropy") > -1
# no warnings should be raise in fileobj because cache is unnecessary
@pytest.mark.parametrize(
("filename"),
[
"unicode.txt",
"unicode.txt.gz",
pytest.param(
"unicode.txt.bz2",
marks=pytest.mark.xfail(not HAS_BZ2, reason="no bz2 support"),
),
pytest.param(
"unicode.txt.xz",
marks=pytest.mark.xfail(not HAS_LZMA, reason="no lzma support"),
),
],
)
def test_read_unicode(filename):
contents = get_pkg_data_contents(os.path.join("data", filename), encoding="utf-8")
assert isinstance(contents, str)
contents = contents.splitlines()[1]
assert contents == "האסטרונומי פייתון"
contents = get_pkg_data_contents(os.path.join("data", filename), encoding="binary")
assert isinstance(contents, bytes)
x = contents.splitlines()[1]
assert x == (
b"\xff\xd7\x94\xd7\x90\xd7\xa1\xd7\x98\xd7\xa8\xd7\x95\xd7\xa0"
b"\xd7\x95\xd7\x9e\xd7\x99 \xd7\xa4\xd7\x99\xd7\x99\xd7\xaa\xd7\x95\xd7\x9f"[1:]
)
def test_compressed_stream():
gzipped_data = (
b"H4sICIxwG1AAA2xvY2FsLmRhdAALycgsVkjLzElVANKlxakpCpl5CiUZqQ"
b"olqcUl8Tn5yYk58SmJJYnxWmCRzLx0hbTSvOSSzPy8Yi5nf78QV78QLgAlLytnRQAAAA=="
)
gzipped_data = base64.b64decode(gzipped_data)
assert isinstance(gzipped_data, bytes)
class FakeStream:
"""
A fake stream that has `read`, but no `seek`.
"""
def __init__(self, data):
self.data = data
def read(self, nbytes=None):
if nbytes is None:
result = self.data
self.data = b""
else:
result = self.data[:nbytes]
self.data = self.data[nbytes:]
return result
stream = FakeStream(gzipped_data)
with get_readable_fileobj(stream, encoding="binary") as f:
f.readline()
assert f.read().rstrip() == b"CONTENT"
@pytest.mark.remote_data(source="astropy")
def test_invalid_location_download_raises_urlerror():
"""
checks that download_file gives a URLError and not an AttributeError,
as its code pathway involves some fiddling with the exception.
"""
with pytest.raises(urllib.error.URLError):
download_file("http://www.astropy.org/nonexistentfile")
def test_invalid_location_download_noconnect():
"""
checks that download_file gives an OSError if the socket is blocked
"""
# This should invoke socket's monkeypatched failure
with pytest.raises(OSError):
download_file("http://astropy.org/nonexistentfile")
@pytest.mark.remote_data(source="astropy")
def test_is_url_in_cache_remote():
assert not is_url_in_cache("http://astropy.org/nonexistentfile")
download_file(TESTURL, cache=True, show_progress=False)
assert is_url_in_cache(TESTURL)
def test_is_url_in_cache_local(temp_cache, valid_urls, invalid_urls):
testurl, contents = next(valid_urls)
nonexistent = next(invalid_urls)
assert not is_url_in_cache(testurl)
assert not is_url_in_cache(nonexistent)
download_file(testurl, cache=True, show_progress=False)
assert is_url_in_cache(testurl)
assert not is_url_in_cache(nonexistent)
# If non-deterministic failure happens see
# https://github.com/astropy/astropy/issues/9765
def test_check_download_cache(tmpdir, temp_cache, valid_urls, invalid_urls):
testurl, testurl_contents = next(valid_urls)
testurl2, testurl2_contents = next(valid_urls)
zip_file_name = os.path.join(tmpdir, "the.zip")
clear_download_cache()
assert not check_download_cache()
download_file(testurl, cache=True)
check_download_cache()
download_file(testurl2, cache=True)
check_download_cache()
export_download_cache(zip_file_name, [testurl, testurl2])
check_download_cache()
clear_download_cache(testurl2)
check_download_cache()
import_download_cache(zip_file_name, [testurl])
check_download_cache()
def test_export_import_roundtrip_one(tmpdir, temp_cache, valid_urls):
testurl, contents = next(valid_urls)
f = download_file(testurl, cache=True, show_progress=False)
assert get_file_contents(f) == contents
initial_urls_in_cache = set(get_cached_urls())
zip_file_name = os.path.join(tmpdir, "the.zip")
export_download_cache(zip_file_name, [testurl])
clear_download_cache(testurl)
import_download_cache(zip_file_name)
assert is_url_in_cache(testurl)
assert set(get_cached_urls()) == initial_urls_in_cache
assert (
get_file_contents(download_file(testurl, cache=True, show_progress=False))
== contents
)
def test_export_url_not_present(temp_cache, valid_urls):
testurl, contents = next(valid_urls)
with NamedTemporaryFile("wb") as zip_file:
assert not is_url_in_cache(testurl)
with pytest.raises(KeyError):
export_download_cache(zip_file, [testurl])
def test_import_one(tmpdir, temp_cache, valid_urls):
testurl, testurl_contents = next(valid_urls)
testurl2, testurl2_contents = next(valid_urls)
zip_file_name = os.path.join(tmpdir, "the.zip")
download_file(testurl, cache=True)
download_file(testurl2, cache=True)
assert is_url_in_cache(testurl2)
export_download_cache(zip_file_name, [testurl, testurl2])
clear_download_cache(testurl)
clear_download_cache(testurl2)
import_download_cache(zip_file_name, [testurl])
assert is_url_in_cache(testurl)
assert not is_url_in_cache(testurl2)
def test_export_import_roundtrip(tmpdir, temp_cache, valid_urls):
zip_file_name = os.path.join(tmpdir, "the.zip")
for u, c in islice(valid_urls, FEW):
download_file(u, cache=True)
initial_urls_in_cache = set(get_cached_urls())
export_download_cache(zip_file_name)
clear_download_cache()
import_download_cache(zip_file_name)
assert set(get_cached_urls()) == initial_urls_in_cache
def test_export_import_roundtrip_stream(temp_cache, valid_urls):
for u, c in islice(valid_urls, FEW):
download_file(u, cache=True)
initial_urls_in_cache = set(get_cached_urls())
with io.BytesIO() as f:
export_download_cache(f)
b = f.getvalue()
clear_download_cache()
with io.BytesIO(b) as f:
import_download_cache(f)
assert set(get_cached_urls()) == initial_urls_in_cache
def test_export_overwrite_flag_works(temp_cache, valid_urls, tmpdir):
fn = tmpdir / "f.zip"
c = b"Some contents\nto check later"
with open(fn, "wb") as f:
f.write(c)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True)
with pytest.raises(FileExistsError):
export_download_cache(fn)
assert get_file_contents(fn, encoding='binary') == c
export_download_cache(fn, overwrite=True)
assert get_file_contents(fn, encoding='binary') != c
def test_export_import_roundtrip_different_location(tmpdir, valid_urls):
original_cache = tmpdir / "original"
os.mkdir(original_cache)
zip_file_name = tmpdir / "the.zip"
urls = list(islice(valid_urls, FEW))
initial_urls_in_cache = set(u for (u, c) in urls)
with paths.set_temp_cache(original_cache):
for u, c in urls:
download_file(u, cache=True)
assert set(get_cached_urls()) == initial_urls_in_cache
export_download_cache(zip_file_name)
new_cache = tmpdir / "new"
os.mkdir(new_cache)
with paths.set_temp_cache(new_cache):
import_download_cache(zip_file_name)
check_download_cache()
assert set(get_cached_urls()) == initial_urls_in_cache
for (u, c) in urls:
assert get_file_contents(download_file(u, cache=True)) == c
def test_cache_size_is_zero_when_empty(temp_cache):
assert not get_cached_urls()
assert cache_total_size() == 0
def test_cache_size_changes_correctly_when_files_are_added_and_removed(
temp_cache, valid_urls
):
u, c = next(valid_urls)
clear_download_cache(u)
s_i = cache_total_size()
download_file(u, cache=True)
assert cache_total_size() == s_i + len(c) + len(u.encode("utf-8"))
clear_download_cache(u)
assert cache_total_size() == s_i
def test_cache_contents_agrees_with_get_urls(temp_cache, valid_urls):
r = []
for a, a_c in islice(valid_urls, FEW):
a_f = download_file(a, cache=True)
r.append((a, a_c, a_f))
assert set(cache_contents().keys()) == set(get_cached_urls())
for (u, c, h) in r:
assert cache_contents()[u] == h
@pytest.mark.parametrize('desired_size',
[1_000_000_000_000_000_000, 1 * _u.Ebyte])
def test_free_space_checker_huge(tmpdir, desired_size):
with pytest.raises(OSError):
check_free_space_in_dir(str(tmpdir), desired_size)
def test_get_free_space_file_directory(tmpdir):
fn = tmpdir / "file"
with open(fn, "w"):
pass
with pytest.raises(OSError):
get_free_space_in_dir(str(fn))
free_space = get_free_space_in_dir(str(tmpdir))
assert free_space > 0 and not hasattr(free_space, 'unit')
# TODO: If unit=True starts to auto-guess prefix, this needs updating.
free_space = get_free_space_in_dir(str(tmpdir), unit=True)
assert free_space > 0 and free_space.unit == _u.byte
free_space = get_free_space_in_dir(str(tmpdir), unit=_u.Mbit)
assert free_space > 0 and free_space.unit == _u.Mbit
def test_download_file_bogus_settings(invalid_urls, temp_cache):
u = next(invalid_urls)
with pytest.raises(KeyError):
download_file(u, sources=[])
def test_download_file_local_directory(tmpdir):
"""Make sure we get a URLError rather than OSError even if it's a
local directory."""
with pytest.raises(urllib.request.URLError):
download_file(url_to(tmpdir))
def test_download_file_schedules_deletion(valid_urls):
u, c = next(valid_urls)
f = download_file(u)
assert f in _tempfilestodel
# how to test deletion actually occurs?
def test_clear_download_cache_refuses_to_delete_outside_the_cache(tmpdir):
fn = os.path.abspath(os.path.join(tmpdir, "file"))
with open(fn, "w") as f:
f.write("content")
assert os.path.exists(fn)
with pytest.raises(RuntimeError):
clear_download_cache(fn)
assert os.path.exists(fn)
def test_check_download_cache_finds_bogus_entries(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file(u, cache=True)
dldir = _get_download_cache_loc()
bf = os.path.abspath(os.path.join(dldir, "bogus"))
with open(bf, "wt") as f:
f.write("bogus file that exists")
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert bf in e.value.bad_files
clear_download_cache()
def test_check_download_cache_finds_bogus_subentries(temp_cache, valid_urls):
u, c = next(valid_urls)
f = download_file(u, cache=True)
bf = os.path.abspath(os.path.join(os.path.dirname(f), "bogus"))
with open(bf, "wt") as f:
f.write("bogus file that exists")
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert bf in e.value.bad_files
clear_download_cache()
def test_check_download_cache_cleanup(temp_cache, valid_urls):
u, c = next(valid_urls)
fn = download_file(u, cache=True)
dldir = _get_download_cache_loc()
bf1 = os.path.abspath(os.path.join(dldir, "bogus1"))
with open(bf1, "wt") as f:
f.write("bogus file that exists")
bf2 = os.path.abspath(os.path.join(os.path.dirname(fn), "bogus2"))
with open(bf2, "wt") as f:
f.write("other bogus file that exists")
bf3 = os.path.abspath(os.path.join(dldir, "contents"))
with open(bf3, "wt") as f:
f.write("awkwardly-named bogus file that exists")
u2, c2 = next(valid_urls)
f2 = download_file(u, cache=True)
os.unlink(f2)
bf4 = os.path.dirname(f2)
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert set(e.value.bad_files) == set([bf1, bf2, bf3, bf4])
for bf in e.value.bad_files:
clear_download_cache(bf)
# download cache will be checked on exit
def test_download_cache_update_doesnt_damage_cache(temp_cache, valid_urls):
u, _ = next(valid_urls)
download_file(u, cache=True)
download_file(u, cache="update")
def test_cache_dir_is_actually_a_file(tmpdir, valid_urls):
"""Ensure that bogus cache settings are handled sensibly.
Because the user can specify the cache location in a config file, and
because they might try to deduce the location by looking around at what's
in their directory tree, and because the cache directory is actual several
tree levels down from the directory set in the config file, it's important
to check what happens if each of the steps in the path is wrong somehow.
"""
def check_quietly_ignores_bogus_cache():
"""We want a broken cache to produce a warning but then astropy should
act like there isn't a cache.
"""
with pytest.warns(CacheMissingWarning):
assert not get_cached_urls()
with pytest.warns(CacheMissingWarning):
assert not is_url_in_cache("http://www.example.com/")
with pytest.warns(CacheMissingWarning):
assert not cache_contents()
with pytest.warns(CacheMissingWarning):
u, c = next(valid_urls)
r = download_file(u, cache=True)
assert get_file_contents(r) == c
# check the filename r appears in a warning message?
# check r is added to the delete_at_exit list?
# in fact should there be testing of the delete_at_exit mechanism,
# as far as that is possible?
with pytest.warns(CacheMissingWarning):
assert not is_url_in_cache(u)
with pytest.warns(CacheMissingWarning):
with pytest.raises(OSError):
check_download_cache()
dldir = _get_download_cache_loc()
# set_temp_cache acts weird if it is pointed at a file (see below)
# but we want to see what happens when the cache is pointed
# at a file instead of a directory, so make a directory we can
# replace later.
fn = str(tmpdir / "file")
ct = "contents\n"
os.mkdir(fn)
with paths.set_temp_cache(fn):
shutil.rmtree(fn)
with open(fn, "w") as f:
f.write(ct)
with pytest.raises(OSError):
paths.get_cache_dir()
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(fn) == ct, "File should not be harmed."
# See what happens when set_temp_cache is pointed at a file
with pytest.raises(OSError):
with paths.set_temp_cache(fn):
pass
assert dldir == _get_download_cache_loc()
assert get_file_contents(str(fn)) == ct
# Now the cache directory is normal but the subdirectory it wants
# to make is a file
cd = str(tmpdir / "astropy")
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmpdir):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
# Ditto one level deeper
os.makedirs(cd)
cd = str(tmpdir / "astropy" / "download")
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmpdir):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
# Ditto another level deeper
os.makedirs(cd)
cd = str(tmpdir / "astropy" / "download" / "url")
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmpdir):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
def test_get_fileobj_str(a_file):
fn, c = a_file
with get_readable_fileobj(str(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_localpath(a_file):
fn, c = a_file
with get_readable_fileobj(py.path.local(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_pathlib(a_file):
fn, c = a_file
with get_readable_fileobj(pathlib.Path(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_binary(a_binary_file):
fn, c = a_binary_file
with get_readable_fileobj(fn, encoding="binary") as rf:
assert rf.read() == c
def test_get_fileobj_already_open_text(a_file):
fn, c = a_file
with open(fn, "r") as f:
with get_readable_fileobj(f) as rf:
with pytest.raises(TypeError):
rf.read()
def test_get_fileobj_already_open_binary(a_file):
fn, c = a_file
with open(fn, "rb") as f:
with get_readable_fileobj(f) as rf:
assert rf.read() == c
def test_get_fileobj_binary_already_open_binary(a_binary_file):
fn, c = a_binary_file
with open(fn, "rb") as f:
with get_readable_fileobj(f, encoding="binary") as rf:
assert rf.read() == c
def test_cache_contents_not_writable(temp_cache, valid_urls):
c = cache_contents()
with pytest.raises(TypeError):
c["foo"] = 7
u, _ = next(valid_urls)
download_file(u, cache=True)
c = cache_contents()
assert u in c
with pytest.raises(TypeError):
c["foo"] = 7
def test_cache_relocatable(tmpdir, valid_urls):
u, c = next(valid_urls)
d1 = tmpdir / "1"
d2 = tmpdir / "2"
os.mkdir(d1)
with paths.set_temp_cache(d1):
p1 = download_file(u, cache=True)
assert is_url_in_cache(u)
assert get_file_contents(p1) == c
shutil.copytree(d1, d2)
clear_download_cache()
with paths.set_temp_cache(d2):
assert is_url_in_cache(u)
p2 = download_file(u, cache=True)
assert p1 != p2
assert os.path.exists(p2)
clear_download_cache(p2)
check_download_cache()
def test_get_readable_fileobj_cleans_up_temporary_files(tmpdir, monkeypatch):
"""checks that get_readable_fileobj leaves no temporary files behind"""
# Create a 'file://' URL pointing to a path on the local filesystem
url = url_to(TESTLOCAL)
# Save temporary files to a known location
monkeypatch.setattr(tempfile, "tempdir", str(tmpdir))
# Call get_readable_fileobj() as a context manager
with get_readable_fileobj(url) as f:
f.read()
# Get listing of files in temporary directory
tempdir_listing = tmpdir.listdir()
# Assert that the temporary file was empty after get_readable_fileobj()
# context manager finished running
assert len(tempdir_listing) == 0
def test_path_objects_get_readable_fileobj():
fpath = pathlib.Path(TESTLOCAL)
with get_readable_fileobj(fpath) as f:
assert f.read().rstrip() == (
"This file is used in the test_local_data_* testing functions\nCONTENT"
)
def test_nested_get_readable_fileobj():
"""Ensure fileobj state is as expected when get_readable_fileobj()
is called inside another get_readable_fileobj().
"""
with get_readable_fileobj(TESTLOCAL, encoding="binary") as fileobj:
with get_readable_fileobj(fileobj, encoding="UTF-8") as fileobj2:
fileobj2.seek(1)
fileobj.seek(1)
# Theoretically, fileobj2 should be closed already here but it is not.
# See https://github.com/astropy/astropy/pull/8675.
# UNCOMMENT THIS WHEN PYTHON FINALLY LETS IT HAPPEN.
# assert fileobj2.closed
assert fileobj.closed and fileobj2.closed
def test_download_file_wrong_size(monkeypatch):
@contextlib.contextmanager
def mockurl(remote_url, timeout=None):
yield MockURL()
def mockurl_builder(*args, tlscontext=None, **kwargs):
mock_opener = type('MockOpener', (object,), {})()
mock_opener.open = mockurl
return mock_opener
class MockURL:
def __init__(self):
self.reader = io.BytesIO(b"a" * real_length)
def info(self):
return {"Content-Length": str(report_length)}
def read(self, length=None):
return self.reader.read(length)
monkeypatch.setattr(astropy.utils.data, "_build_urlopener", mockurl_builder)
with pytest.raises(urllib.error.ContentTooShortError):
report_length = 1024
real_length = 1023
download_file(TESTURL, cache=False)
with pytest.raises(urllib.error.URLError):
report_length = 1023
real_length = 1024
download_file(TESTURL, cache=False)
report_length = 1023
real_length = 1023
fn = download_file(TESTURL, cache=False)
with open(fn, "rb") as f:
assert f.read() == b"a" * real_length
report_length = None
real_length = 1023
fn = download_file(TESTURL, cache=False)
with open(fn, "rb") as f:
assert f.read() == b"a" * real_length
def test_can_make_directories_readonly(tmpdir):
try:
with readonly_dir(tmpdir):
assert is_dir_readonly(tmpdir)
except AssertionError:
if hasattr(os, "geteuid") and os.geteuid() == 0:
pytest.skip(
"We are root, we can't make a directory un-writable with chmod."
)
elif platform.system() == "Windows":
pytest.skip(
"It seems we can't make a driectory un-writable under Windows "
"with chmod, in spite of the documentation."
)
else:
raise
def test_can_make_files_readonly(tmpdir):
fn = tmpdir / "test"
c = "contents\n"
with open(fn, "w") as f:
f.write(c)
with readonly_dir(tmpdir):
try:
with open(fn, "w+") as f:
f.write("more contents\n")
except PermissionError:
pass
else:
if hasattr(os, "geteuid") and os.geteuid() == 0:
pytest.skip("We are root, we can't make a file un-writable with chmod.")
assert get_file_contents(fn) == c
def test_read_cache_readonly(readonly_cache):
assert cache_contents() == readonly_cache
def test_download_file_cache_readonly(readonly_cache):
for u in readonly_cache:
f = download_file(u, cache=True)
assert f == readonly_cache[u]
def test_import_file_cache_readonly(readonly_cache, tmpdir):
filename = os.path.join(tmpdir, "test-file")
content = "Some text or other"
url = "http://example.com/"
with open(filename, "wt") as f:
f.write(content)
with pytest.raises(OSError):
import_file_to_cache(url, filename, remove_original=True)
assert not is_url_in_cache(url)
def test_download_file_cache_readonly_cache_miss(readonly_cache, valid_urls):
u, c = next(valid_urls)
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache=True)
assert get_file_contents(f) == c
assert not is_url_in_cache(u)
def test_download_file_cache_readonly_update(readonly_cache):
for u in readonly_cache:
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache="update")
assert f != readonly_cache[u]
assert compute_hash(f) == compute_hash(readonly_cache[u])
def test_check_download_cache_works_if_readonly(readonly_cache):
check_download_cache()
# On Windows I can't make directories readonly. On CircleCI I can't make
# anything readonly because the test suite runs as root. So on those platforms
# none of the "real" tests above can be run. I can use monkeypatch to trigger
# the readonly code paths, see the "fake" versions of the tests below, but I
# don't totally trust those to completely explore what happens either, so we
# have both. I couldn't see an easy way to parameterize over fixtures and share
# tests.
def test_read_cache_fake_readonly(fake_readonly_cache):
assert cache_contents() == fake_readonly_cache
def test_download_file_cache_fake_readonly(fake_readonly_cache):
for u in fake_readonly_cache:
f = download_file(u, cache=True)
assert f == fake_readonly_cache[u]
def test_mkdtemp_cache_fake_readonly(fake_readonly_cache):
with pytest.raises(OSError):
tempfile.mkdtemp()
def test_TD_cache_fake_readonly(fake_readonly_cache):
with pytest.raises(OSError):
with TemporaryDirectory():
pass
def test_import_file_cache_fake_readonly(fake_readonly_cache, tmpdir):
filename = os.path.join(tmpdir, "test-file")
content = "Some text or other"
url = "http://example.com/"
with open(filename, "wt") as f:
f.write(content)
with pytest.raises(OSError):
import_file_to_cache(url, filename, remove_original=True)
assert not is_url_in_cache(url)
def test_download_file_cache_fake_readonly_cache_miss(fake_readonly_cache, valid_urls):
u, c = next(valid_urls)
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache=True)
assert not is_url_in_cache(u)
assert get_file_contents(f) == c
def test_download_file_cache_fake_readonly_update(fake_readonly_cache):
for u in fake_readonly_cache:
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache="update")
assert f != fake_readonly_cache[u]
assert compute_hash(f) == compute_hash(fake_readonly_cache[u])
def test_check_download_cache_works_if_fake_readonly(fake_readonly_cache):
check_download_cache()
def test_pkgname_isolation(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True, pkgname=a)
assert not get_cached_urls()
assert len(get_cached_urls(pkgname=a)) == FEW
assert cache_total_size() < cache_total_size(pkgname=a)
for u, _ in islice(valid_urls, FEW+1):
download_file(u, cache=True)
assert len(get_cached_urls()) == FEW+1
assert len(get_cached_urls(pkgname=a)) == FEW
assert cache_total_size() > cache_total_size(pkgname=a)
assert set(get_cached_urls()) == set(cache_contents().keys())
assert set(get_cached_urls(pkgname=a)) == set(cache_contents(pkgname=a).keys())
for i in get_cached_urls():
assert is_url_in_cache(i)
assert not is_url_in_cache(i, pkgname=a)
for i in get_cached_urls(pkgname=a):
assert not is_url_in_cache(i)
assert is_url_in_cache(i, pkgname=a)
# FIXME: need to break a cache to test whether we check the right one
check_download_cache()
check_download_cache(pkgname=a)
# FIXME: check that cache='update' works
u = get_cached_urls()[0]
with pytest.raises(KeyError):
download_file(u, cache=True, sources=[], pkgname=a)
clear_download_cache(u, pkgname=a)
assert len(get_cached_urls()) == FEW+1, "wrong pkgname should do nothing"
assert len(get_cached_urls(pkgname=a)) == FEW, "wrong pkgname should do nothing"
f = download_file(u, sources=[], cache=True)
with pytest.raises(RuntimeError):
clear_download_cache(f, pkgname=a)
ua = get_cached_urls(pkgname=a)[0]
with pytest.raises(KeyError):
download_file(ua, cache=True, sources=[])
fa = download_file(ua, sources=[], cache=True, pkgname=a)
with pytest.raises(RuntimeError):
clear_download_cache(fa)
clear_download_cache(ua, pkgname=a)
assert len(get_cached_urls()) == FEW+1
assert len(get_cached_urls(pkgname=a)) == FEW-1
clear_download_cache(u)
assert len(get_cached_urls()) == FEW
assert len(get_cached_urls(pkgname=a)) == FEW-1
clear_download_cache(pkgname=a)
assert len(get_cached_urls()) == FEW
assert not get_cached_urls(pkgname=a)
clear_download_cache()
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
def test_transport_cache_via_zip(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True)
with io.BytesIO() as f:
export_download_cache(f)
b = f.getvalue()
with io.BytesIO(b) as f:
import_download_cache(f, pkgname=a)
check_download_cache()
check_download_cache(pkgname=a)
assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))
cca = cache_contents(pkgname=a)
for k, v in cache_contents().items():
assert v != cca[k]
assert get_file_contents(v) == get_file_contents(cca[k])
clear_download_cache()
with io.BytesIO() as f:
export_download_cache(f, pkgname=a)
b = f.getvalue()
with io.BytesIO(b) as f:
import_download_cache(f)
assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))
def test_download_parallel_respects_pkgname(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
download_files_in_parallel([u for (u, c) in islice(valid_urls, FEW)],
pkgname=a)
assert not get_cached_urls()
assert len(get_cached_urls(pkgname=a)) == FEW
@pytest.mark.skipif(not CAN_RENAME_DIRECTORY_IN_USE,
reason="This platform is unable to rename directories that are in use.")
def test_removal_of_open_files(temp_cache, valid_urls):
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
clear_download_cache(u)
assert not is_url_in_cache(u)
check_download_cache()
@pytest.mark.skipif(not CAN_RENAME_DIRECTORY_IN_USE,
reason="This platform is unable to rename directories that are in use.")
def test_update_of_open_files(temp_cache, valid_urls):
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
u2, c2 = next(valid_urls)
f = download_file(u, cache='update', sources=[u2])
check_download_cache()
assert is_url_in_cache(u)
assert get_file_contents(f) == c2
assert is_url_in_cache(u)
def test_removal_of_open_files_windows(temp_cache, valid_urls, monkeypatch):
def no_rmtree(*args, **kwargs):
warnings.warn(CacheMissingWarning("in use"))
raise PermissionError
if CAN_RENAME_DIRECTORY_IN_USE:
# This platform is able to remove files while in use.
monkeypatch.setattr(astropy.utils.data, "_rmtree", no_rmtree)
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
with pytest.warns(CacheMissingWarning, match=r".*in use.*"):
clear_download_cache(u)
def test_update_of_open_files_windows(temp_cache, valid_urls, monkeypatch):
def no_rmtree(*args, **kwargs):
warnings.warn(CacheMissingWarning("in use"))
raise PermissionError
if CAN_RENAME_DIRECTORY_IN_USE:
# This platform is able to remove files while in use.
monkeypatch.setattr(astropy.utils.data, "_rmtree", no_rmtree)
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
u2, c2 = next(valid_urls)
with pytest.warns(CacheMissingWarning, match=r".*in use.*"):
f = download_file(u, cache='update', sources=[u2])
check_download_cache()
assert is_url_in_cache(u)
assert get_file_contents(f) == c2
assert get_file_contents(download_file(u, cache=True, sources=[])) == c
def test_no_allow_internet(temp_cache, valid_urls):
u, c = next(valid_urls)
with conf.set_temp('allow_internet', False):
with pytest.raises(urllib.error.URLError):
download_file(u)
assert not is_url_in_cache(u)
with pytest.raises(urllib.error.URLError):
# This will trigger the remote data error if it's allowed to touch the internet
download_file(TESTURL)
def test_clear_download_cache_not_too_aggressive(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file(u, cache=True)
dldir = _get_download_cache_loc()
bad_filename = os.path.join(dldir, "contents")
assert is_url_in_cache(u)
clear_download_cache(bad_filename)
assert is_url_in_cache(u)
def test_clear_download_cache_variants(temp_cache, valid_urls):
# deletion by contents filename
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(f)
assert not is_url_in_cache(u)
# deletion by url filename
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.join(os.path.dirname(f), 'url'))
assert not is_url_in_cache(u)
# deletion by hash directory name
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.dirname(f))
assert not is_url_in_cache(u)
# deletion by directory name with trailing slash
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.dirname(f)+'/')
assert not is_url_in_cache(u)
# deletion by hash of file contents
u, c = next(valid_urls)
f = download_file(u, cache=True)
h = compute_hash(f)
clear_download_cache(h)
assert not is_url_in_cache(u)
@pytest.mark.skipif("CI", reason="Flaky on CI")
@pytest.mark.remote_data
def test_ftp_tls_auto(temp_cache):
url = "ftp://anonymous:mail%[email protected]/pub/products/iers/finals2000A.all" # noqa
download_file(url)
@pytest.mark.parametrize('base', ["http://example.com", "https://example.com"])
def test_url_trailing_slash(temp_cache, valid_urls, base):
slash = base + "/"
no_slash = base
u, c = next(valid_urls)
download_file(slash, cache=True, sources=[u])
assert is_url_in_cache(no_slash)
download_file(no_slash, cache=True, sources=[])
clear_download_cache(no_slash)
assert not is_url_in_cache(no_slash)
assert not is_url_in_cache(slash)
download_file(no_slash, cache=True, sources=[u])
# see if implicit check_download_cache squawks
def test_empty_url(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file('file://', cache=True, sources=[u])
assert not is_url_in_cache('file:///')
@pytest.mark.remote_data
def test_download_ftp_file_properly_handles_socket_error():
faulty_url = "ftp://anonymous:mail%40astropy.org@nonexisting/pub/products/iers/finals2000A.all"
with pytest.raises(urllib.error.URLError) as excinfo:
download_file(faulty_url)
errmsg = excinfo.exconly()
found_msg = False
possible_msgs = ['Name or service not known',
'nodename nor servname provided, or not known',
'getaddrinfo failed',
'Temporary failure in name resolution',
'No address associated with hostname']
for cur_msg in possible_msgs:
if cur_msg in errmsg:
found_msg = True
break
assert found_msg, f'Got {errmsg}, expected one of these: {",".join(possible_msgs)}'
@pytest.mark.parametrize(
('s', 'ans'),
[('http://googlecom', True),
('https://google.com', True),
('ftp://google.com', True),
('sftp://google.com', True),
('ssh://google.com', True),
('file:///c:/path/to/the%20file.txt', True),
('google.com', False),
('C:\\\\path\\\\file.docx', False),
('data://file', False)])
def test_string_is_url_check(s, ans):
assert is_url(s) is ans
|
22a833f584c1129938097c24d952ecf6cc3ca28b25f66b7914a0f17af97cf6c6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import concurrent.futures
import inspect
import pickle
import pytest
from astropy.utils.decorators import (deprecated_attribute, deprecated,
sharedmethod, classproperty, lazyproperty,
format_doc, deprecated_renamed_argument)
from astropy.utils.exceptions import (AstropyDeprecationWarning,
AstropyPendingDeprecationWarning,
AstropyUserWarning)
class NewDeprecationWarning(AstropyDeprecationWarning):
"""
New Warning subclass to be used to test the deprecated decorator's
``warning_type`` parameter.
"""
def test_deprecated_attribute():
class DummyClass:
def __init__(self):
self._foo = 42
self._bar = 4242
self._message = '42'
self._alternative = [42]
self._pending = {42}
def set_private(self):
self._foo = 100
self._bar = 1000
self._message = '100'
self._alternative = [100]
self._pending = {100}
foo = deprecated_attribute('foo', '0.2')
bar = deprecated_attribute('bar', '0.2',
warning_type=NewDeprecationWarning)
alternative = deprecated_attribute('alternative', '0.2',
alternative='other')
message = deprecated_attribute('message', '0.2', message='MSG')
pending = deprecated_attribute('pending', '0.2', pending=True)
dummy = DummyClass()
with pytest.warns(AstropyDeprecationWarning, match="The foo attribute is "
"deprecated and may be removed in a future version.") as w:
dummy.foo
assert len(w) == 1
with pytest.warns(NewDeprecationWarning, match="The bar attribute is "
"deprecated and may be removed in a future version.") as w:
dummy.bar
assert len(w) == 1
with pytest.warns(AstropyDeprecationWarning, match="MSG"):
dummy.message
with pytest.warns(AstropyDeprecationWarning, match=r"Use other instead\."):
dummy.alternative
with pytest.warns(AstropyPendingDeprecationWarning):
dummy.pending
dummy.set_private()
# This needs to be defined outside of the test function, because we
# want to try to pickle it.
@deprecated('100.0')
class TA:
"""
This is the class docstring.
"""
def __init__(self):
"""
This is the __init__ docstring
"""
pass
class TMeta(type):
metaclass_attr = 1
@deprecated('100.0')
class TB(metaclass=TMeta):
pass
@deprecated('100.0', warning_type=NewDeprecationWarning)
class TC:
"""
This class has the custom warning.
"""
pass
def test_deprecated_class():
orig_A = TA.__bases__[0]
# The only thing that should be different about the new class
# is __doc__, __init__, __bases__ and __subclasshook__.
# and __init_subclass__ for Python 3.6+.
for x in dir(orig_A):
if x not in ('__doc__', '__init__', '__bases__', '__dict__',
'__subclasshook__', '__init_subclass__'):
assert getattr(TA, x) == getattr(orig_A, x)
with pytest.warns(AstropyDeprecationWarning) as w:
TA()
assert len(w) == 1
if TA.__doc__ is not None:
assert 'function' not in TA.__doc__
assert 'deprecated' in TA.__doc__
assert 'function' not in TA.__init__.__doc__
assert 'deprecated' in TA.__init__.__doc__
# Make sure the object is picklable
pickle.dumps(TA)
with pytest.warns(NewDeprecationWarning) as w:
TC()
assert len(w) == 1
def test_deprecated_class_with_new_method():
"""
Test that a class with __new__ method still works even if it accepts
additional arguments.
This previously failed because the deprecated decorator would wrap objects
__init__ which takes no arguments.
"""
@deprecated('1.0')
class A:
def __new__(cls, a):
return super().__new__(cls)
# Creating an instance should work but raise a DeprecationWarning
with pytest.warns(AstropyDeprecationWarning) as w:
A(1)
assert len(w) == 1
@deprecated('1.0')
class B:
def __new__(cls, a):
return super().__new__(cls)
def __init__(self, a):
pass
# Creating an instance should work but raise a DeprecationWarning
with pytest.warns(AstropyDeprecationWarning) as w:
B(1)
assert len(w) == 1
def test_deprecated_class_with_super():
"""
Regression test for an issue where classes that used ``super()`` in their
``__init__`` did not actually call the correct class's ``__init__`` in the
MRO.
"""
@deprecated('100.0')
class TB:
def __init__(self, a, b):
super().__init__()
with pytest.warns(AstropyDeprecationWarning) as w:
TB(1, 2)
assert len(w) == 1
if TB.__doc__ is not None:
assert 'function' not in TB.__doc__
assert 'deprecated' in TB.__doc__
assert 'function' not in TB.__init__.__doc__
assert 'deprecated' in TB.__init__.__doc__
def test_deprecated_class_with_custom_metaclass():
"""
Regression test for an issue where deprecating a class with a metaclass
other than type did not restore the metaclass properly.
"""
with pytest.warns(AstropyDeprecationWarning) as w:
TB()
assert len(w) == 1
assert type(TB) is TMeta
assert TB.metaclass_attr == 1
def test_deprecated_static_and_classmethod():
"""
Regression test for issue introduced by
https://github.com/astropy/astropy/pull/2811 and mentioned also here:
https://github.com/astropy/astropy/pull/2580#issuecomment-51049969
where it appears that deprecated staticmethods didn't work on Python 2.6.
"""
class A:
"""Docstring"""
@deprecated('1.0')
@staticmethod
def B():
pass
@deprecated('1.0')
@classmethod
def C(cls):
pass
with pytest.warns(AstropyDeprecationWarning) as w:
A.B()
assert len(w) == 1
if A.__doc__ is not None:
assert 'deprecated' in A.B.__doc__
with pytest.warns(AstropyDeprecationWarning) as w:
A.C()
assert len(w) == 1
if A.__doc__ is not None:
assert 'deprecated' in A.C.__doc__
def test_deprecated_argument():
# Tests the decorator with function, method, staticmethod and classmethod.
class Test:
@classmethod
@deprecated_renamed_argument('clobber', 'overwrite', '1.3')
def test1(cls, overwrite):
return overwrite
@staticmethod
@deprecated_renamed_argument('clobber', 'overwrite', '1.3')
def test2(overwrite):
return overwrite
@deprecated_renamed_argument('clobber', 'overwrite', '1.3')
def test3(self, overwrite):
return overwrite
@deprecated_renamed_argument('clobber', 'overwrite', '1.3',
warning_type=NewDeprecationWarning)
def test4(self, overwrite):
return overwrite
@deprecated_renamed_argument('clobber', 'overwrite', '1.3', relax=False)
def test1(overwrite):
return overwrite
for method in [Test().test1, Test().test2, Test().test3, Test().test4, test1]:
# As positional argument only
assert method(1) == 1
# As new keyword argument
assert method(overwrite=1) == 1
# Using the deprecated name
with pytest.warns(AstropyDeprecationWarning, match=r"1\.3") as w:
assert method(clobber=1) == 1
assert len(w) == 1
assert 'test_decorators.py' in str(w[0].filename)
if method.__name__ == 'test4':
assert issubclass(w[0].category, NewDeprecationWarning)
# Using both. Both keyword
with pytest.raises(TypeError), pytest.warns(AstropyDeprecationWarning):
method(clobber=2, overwrite=1)
# One positional, one keyword
with pytest.raises(TypeError), pytest.warns(AstropyDeprecationWarning):
method(1, clobber=2)
def test_deprecated_argument_custom_message():
@deprecated_renamed_argument('foo', 'bar', '4.0', message='Custom msg')
def test(bar=0):
pass
with pytest.warns(AstropyDeprecationWarning, match='Custom msg'):
test(foo=0)
def test_deprecated_argument_in_kwargs():
# To rename an argument that is consumed by "kwargs" the "arg_in_kwargs"
# parameter is used.
@deprecated_renamed_argument('clobber', 'overwrite', '1.3',
arg_in_kwargs=True)
def test(**kwargs):
return kwargs['overwrite']
# As positional argument only
with pytest.raises(TypeError):
test(1)
# As new keyword argument
assert test(overwrite=1) == 1
# Using the deprecated name
with pytest.warns(AstropyDeprecationWarning, match=r"1\.3") as w:
assert test(clobber=1) == 1
assert len(w) == 1
assert 'test_decorators.py' in str(w[0].filename)
# Using both. Both keyword
with pytest.raises(TypeError), pytest.warns(AstropyDeprecationWarning):
test(clobber=2, overwrite=1)
# One positional, one keyword
with pytest.raises(TypeError), pytest.warns(AstropyDeprecationWarning):
test(1, clobber=2)
def test_deprecated_argument_relaxed():
# Relax turns the TypeError if both old and new keyword are used into
# a warning.
@deprecated_renamed_argument('clobber', 'overwrite', '1.3', relax=True)
def test(overwrite):
return overwrite
# As positional argument only
assert test(1) == 1
# As new keyword argument
assert test(overwrite=1) == 1
# Using the deprecated name
with pytest.warns(AstropyDeprecationWarning, match=r"1\.3") as w:
assert test(clobber=1) == 1
assert len(w) == 1
# Using both. Both keyword
with pytest.warns(AstropyUserWarning) as w:
assert test(clobber=2, overwrite=1) == 1
assert len(w) == 2
assert '"clobber" was deprecated' in str(w[0].message)
assert '"clobber" and "overwrite" keywords were set' in str(w[1].message)
# One positional, one keyword
with pytest.warns(AstropyUserWarning) as w:
assert test(1, clobber=2) == 1
assert len(w) == 2
assert '"clobber" was deprecated' in str(w[0].message)
assert '"clobber" and "overwrite" keywords were set' in str(w[1].message)
def test_deprecated_argument_pending():
# Relax turns the TypeError if both old and new keyword are used into
# a warning.
@deprecated_renamed_argument('clobber', 'overwrite', '1.3', pending=True)
def test(overwrite):
return overwrite
# As positional argument only
assert test(1) == 1
# As new keyword argument
assert test(overwrite=1) == 1
# Using the deprecated name
assert test(clobber=1) == 1
# Using both. Both keyword
assert test(clobber=2, overwrite=1) == 1
# One positional, one keyword
assert test(1, clobber=2) == 1
def test_deprecated_argument_multi_deprecation():
@deprecated_renamed_argument(['x', 'y', 'z'], ['a', 'b', 'c'],
[1.3, 1.2, 1.3], relax=True)
def test(a, b, c):
return a, b, c
with pytest.warns(AstropyDeprecationWarning) as w:
assert test(x=1, y=2, z=3) == (1, 2, 3)
assert len(w) == 3
# Make sure relax is valid for all arguments
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, b=3) == (1, 3, 3)
assert len(w) == 4
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, a=3) == (3, 2, 3)
assert len(w) == 4
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, c=5) == (1, 2, 5)
assert len(w) == 4
def test_deprecated_argument_multi_deprecation_2():
@deprecated_renamed_argument(['x', 'y', 'z'], ['a', 'b', 'c'],
[1.3, 1.2, 1.3], relax=[True, True, False])
def test(a, b, c):
return a, b, c
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, b=3) == (1, 3, 3)
assert len(w) == 4
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, a=3) == (3, 2, 3)
assert len(w) == 4
with pytest.raises(TypeError), pytest.warns(AstropyUserWarning):
assert test(x=1, y=2, z=3, c=5) == (1, 2, 5)
def test_deprecated_argument_not_allowed_use():
# If the argument is supposed to be inside the kwargs one needs to set the
# arg_in_kwargs parameter. Without it it raises a TypeError.
with pytest.raises(TypeError):
@deprecated_renamed_argument('clobber', 'overwrite', '1.3')
def test1(**kwargs):
return kwargs['overwrite']
# Cannot replace "*args".
with pytest.raises(TypeError):
@deprecated_renamed_argument('overwrite', 'args', '1.3')
def test2(*args):
return args
# Cannot replace "**kwargs".
with pytest.raises(TypeError):
@deprecated_renamed_argument('overwrite', 'kwargs', '1.3')
def test3(**kwargs):
return kwargs
def test_deprecated_argument_remove():
@deprecated_renamed_argument('x', None, '2.0', alternative='astropy.y')
def test(dummy=11, x=3):
return dummy, x
with pytest.warns(AstropyDeprecationWarning, match=r"Use astropy\.y instead") as w:
assert test(x=1) == (11, 1)
assert len(w) == 1
with pytest.warns(AstropyDeprecationWarning) as w:
assert test(x=1, dummy=10) == (10, 1)
assert len(w) == 1
with pytest.warns(AstropyDeprecationWarning, match=r'Use astropy\.y instead'):
test(121, 1) == (121, 1)
assert test() == (11, 3)
assert test(121) == (121, 3)
assert test(dummy=121) == (121, 3)
def test_sharedmethod_reuse_on_subclasses():
"""
Regression test for an issue where sharedmethod would bind to one class
for all time, causing the same method not to work properly on other
subclasses of that class.
It has the same problem when the same sharedmethod is called on different
instances of some class as well.
"""
class AMeta(type):
def foo(cls):
return cls.x
class A:
x = 3
def __init__(self, x):
self.x = x
@sharedmethod
def foo(self):
return self.x
a1 = A(1)
a2 = A(2)
assert a1.foo() == 1
assert a2.foo() == 2
# Similar test now, but for multiple subclasses using the same sharedmethod
# as a classmethod
assert A.foo() == 3
class B(A):
x = 5
assert B.foo() == 5
def test_classproperty_docstring():
"""
Tests that the docstring is set correctly on classproperties.
This failed previously due to a bug in Python that didn't always
set __doc__ properly on instances of property subclasses.
"""
class A:
# Inherits docstring from getter
@classproperty
def foo(cls):
"""The foo."""
return 1
assert A.__dict__['foo'].__doc__ == "The foo."
class B:
# Use doc passed to classproperty constructor
def _get_foo(cls): return 1
foo = classproperty(_get_foo, doc="The foo.")
assert B.__dict__['foo'].__doc__ == "The foo."
@pytest.mark.slow
def test_classproperty_lazy_threadsafe(fast_thread_switching):
"""
Test that a class property with lazy=True is thread-safe.
"""
workers = 8
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
# This is testing for race conditions, so try many times in the
# hope that we'll get the timing right.
for p in range(10000):
class A:
@classproperty(lazy=True)
def foo(cls):
nonlocal calls
calls += 1
return object()
# Have all worker threads query in parallel
calls = 0
futures = [executor.submit(lambda: A.foo) for i in range(workers)]
# Check that only one call happened and they all received it
values = [future.result() for future in futures]
assert calls == 1
assert values[0] is not None
assert values == [values[0]] * workers
@pytest.mark.slow
def test_lazyproperty_threadsafe(fast_thread_switching):
"""
Test thread safety of lazyproperty.
"""
# This test is generally similar to test_classproperty_lazy_threadsafe
# above. See there for comments.
class A:
def __init__(self):
self.calls = 0
@lazyproperty
def foo(self):
self.calls += 1
return object()
workers = 8
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
for p in range(10000):
a = A()
futures = [executor.submit(lambda: a.foo) for i in range(workers)]
values = [future.result() for future in futures]
assert a.calls == 1
assert a.foo is not None
assert values == [a.foo] * workers
def test_format_doc_stringInput_simple():
# Simple tests with string input
docstring_fail = ''
# Raises an valueerror if input is empty
with pytest.raises(ValueError):
@format_doc(docstring_fail)
def testfunc_fail():
pass
docstring = 'test'
# A first test that replaces an empty docstring
@format_doc(docstring)
def testfunc_1():
pass
assert inspect.getdoc(testfunc_1) == docstring
# Test that it replaces an existing docstring
@format_doc(docstring)
def testfunc_2():
'''not test'''
pass
assert inspect.getdoc(testfunc_2) == docstring
def test_format_doc_stringInput_format():
# Tests with string input and formatting
docstring = 'yes {0} no {opt}'
# Raises an indexerror if not given the formatted args and kwargs
with pytest.raises(IndexError):
@format_doc(docstring)
def testfunc1():
pass
# Test that the formatting is done right
@format_doc(docstring, '/', opt='= life')
def testfunc2():
pass
assert inspect.getdoc(testfunc2) == 'yes / no = life'
# Test that we can include the original docstring
docstring2 = 'yes {0} no {__doc__}'
@format_doc(docstring2, '/')
def testfunc3():
'''= 2 / 2 * life'''
pass
assert inspect.getdoc(testfunc3) == 'yes / no = 2 / 2 * life'
def test_format_doc_objectInput_simple():
# Simple tests with object input
def docstring_fail():
pass
# Self input while the function has no docstring raises an error
with pytest.raises(ValueError):
@format_doc(docstring_fail)
def testfunc_fail():
pass
def docstring0():
'''test'''
pass
# A first test that replaces an empty docstring
@format_doc(docstring0)
def testfunc_1():
pass
assert inspect.getdoc(testfunc_1) == inspect.getdoc(docstring0)
# Test that it replaces an existing docstring
@format_doc(docstring0)
def testfunc_2():
'''not test'''
pass
assert inspect.getdoc(testfunc_2) == inspect.getdoc(docstring0)
def test_format_doc_objectInput_format():
# Tests with object input and formatting
def docstring():
'''test {0} test {opt}'''
pass
# Raises an indexerror if not given the formatted args and kwargs
with pytest.raises(IndexError):
@format_doc(docstring)
def testfunc_fail():
pass
# Test that the formatting is done right
@format_doc(docstring, '+', opt='= 2 * test')
def testfunc2():
pass
assert inspect.getdoc(testfunc2) == 'test + test = 2 * test'
# Test that we can include the original docstring
def docstring2():
'''test {0} test {__doc__}'''
pass
@format_doc(docstring2, '+')
def testfunc3():
'''= 4 / 2 * test'''
pass
assert inspect.getdoc(testfunc3) == 'test + test = 4 / 2 * test'
def test_format_doc_selfInput_simple():
# Simple tests with self input
# Self input while the function has no docstring raises an error
with pytest.raises(ValueError):
@format_doc(None)
def testfunc_fail():
pass
# Test that it keeps an existing docstring
@format_doc(None)
def testfunc_1():
'''not test'''
pass
assert inspect.getdoc(testfunc_1) == 'not test'
def test_format_doc_selfInput_format():
# Tests with string input which is '__doc__' (special case) and formatting
# Raises an indexerror if not given the formatted args and kwargs
with pytest.raises(IndexError):
@format_doc(None)
def testfunc_fail():
'''dum {0} dum {opt}'''
pass
# Test that the formatting is done right
@format_doc(None, 'di', opt='da dum')
def testfunc1():
'''dum {0} dum {opt}'''
pass
assert inspect.getdoc(testfunc1) == 'dum di dum da dum'
# Test that we cannot recursively insert the original documentation
@format_doc(None, 'di')
def testfunc2():
'''dum {0} dum {__doc__}'''
pass
assert inspect.getdoc(testfunc2) == 'dum di dum '
def test_format_doc_onMethod():
# Check if the decorator works on methods too, to spice it up we try double
# decorator
docstring = 'what we do {__doc__}'
class TestClass:
@format_doc(docstring)
@format_doc(None, 'strange.')
def test_method(self):
'''is {0}'''
pass
assert inspect.getdoc(TestClass.test_method) == 'what we do is strange.'
def test_format_doc_onClass():
# Check if the decorator works on classes too
docstring = 'what we do {__doc__} {0}{opt}'
@format_doc(docstring, 'strange', opt='.')
class TestClass:
'''is'''
pass
assert inspect.getdoc(TestClass) == 'what we do is strange.'
|
ff0a37f58dcf1ef448612ce96108e839b782de90f572a1426977e207157e0824 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Built-in mask mixin class.
The design uses `Masked` as a factory class which automatically
generates new subclasses for any data class that is itself a
subclass of a predefined masked class, with `MaskedNDArray`
providing such a predefined class for `~numpy.ndarray`.
Generally, any new predefined class should override the
``from_unmasked(data, mask, copy=False)`` class method that
creates an instance from unmasked data and a mask, as well as
the ``unmasked`` property that returns just the data.
The `Masked` class itself provides a base ``mask`` property,
which can also be overridden if needed.
"""
import builtins
import numpy as np
from astropy.utils.shapes import NDArrayShapeMethods
from astropy.utils.data_info import ParentDtypeInfo
from .function_helpers import (MASKED_SAFE_FUNCTIONS,
APPLY_TO_BOTH_FUNCTIONS,
DISPATCHED_FUNCTIONS,
UNSUPPORTED_FUNCTIONS)
__all__ = ['Masked', 'MaskedNDArray']
get__doc__ = """Masked version of {0.__name__}.
Except for the ability to pass in a ``mask``, parameters are
as for `{0.__module__}.{0.__name__}`.
""".format
class Masked(NDArrayShapeMethods):
"""A scalar value or array of values with associated mask.
The resulting instance will take its exact type from whatever the
contents are, with the type generated on the fly as needed.
Parameters
----------
data : array-like
The data for which a mask is to be added. The result will be a
a subclass of the type of ``data``.
mask : array-like of bool, optional
The initial mask to assign. If not given, taken from the data.
copy : bool
Whether the data and mask should be copied. Default: `False`.
"""
_base_classes = {}
"""Explicitly defined masked classes keyed by their unmasked counterparts.
For subclasses of these unmasked classes, masked counterparts can be generated.
"""
_masked_classes = {}
"""Masked classes keyed by their unmasked data counterparts."""
def __new__(cls, *args, **kwargs):
if cls is Masked:
# Initializing with Masked itself means we're in "factory mode".
if not kwargs and len(args) == 1 and isinstance(args[0], type):
# Create a new masked class.
return cls._get_masked_cls(args[0])
else:
return cls._get_masked_instance(*args, **kwargs)
else:
# Otherwise we're a subclass and should just pass information on.
return super().__new__(cls, *args, **kwargs)
def __init_subclass__(cls, base_cls=None, data_cls=None, **kwargs):
"""Register a Masked subclass.
Parameters
----------
base_cls : type, optional
If given, it is taken to mean that ``cls`` can be used as
a base for masked versions of all subclasses of ``base_cls``,
so it is registered as such in ``_base_classes``.
data_cls : type, optional
If given, ``cls`` should will be registered as the masked version of
``data_cls``. Will set the private ``cls._data_cls`` attribute,
and auto-generate a docstring if not present already.
**kwargs
Passed on for possible further initialization by superclasses.
"""
if base_cls is not None:
Masked._base_classes[base_cls] = cls
if data_cls is not None:
cls._data_cls = data_cls
cls._masked_classes[data_cls] = cls
if cls.__doc__ is None:
cls.__doc__ = get__doc__(data_cls)
super().__init_subclass__(**kwargs)
# This base implementation just uses the class initializer.
# Subclasses can override this in case the class does not work
# with this signature, or to provide a faster implementation.
@classmethod
def from_unmasked(cls, data, mask=None, copy=False):
"""Create an instance from unmasked data and a mask."""
return cls(data, mask=mask, copy=copy)
@classmethod
def _get_masked_instance(cls, data, mask=None, copy=False):
data, data_mask = cls._get_data_and_mask(data)
if mask is None:
mask = False if data_mask is None else data_mask
masked_cls = cls._get_masked_cls(data.__class__)
return masked_cls.from_unmasked(data, mask, copy)
@classmethod
def _get_masked_cls(cls, data_cls):
"""Get the masked wrapper for a given data class.
If the data class does not exist yet but is a subclass of any of the
registered base data classes, it is automatically generated
(except we skip `~numpy.ma.MaskedArray` subclasses, since then the
masking mechanisms would interfere).
"""
if issubclass(data_cls, (Masked, np.ma.MaskedArray)):
return data_cls
masked_cls = cls._masked_classes.get(data_cls)
if masked_cls is None:
# Walk through MRO and find closest base data class.
# Note: right now, will basically always be ndarray, but
# one could imagine needing some special care for one subclass,
# which would then get its own entry. E.g., if MaskedAngle
# defined something special, then MaskedLongitude should depend
# on it.
for mro_item in data_cls.__mro__:
base_cls = cls._base_classes.get(mro_item)
if base_cls is not None:
break
else:
# Just hope that MaskedNDArray can handle it.
# TODO: this covers the case where a user puts in a list or so,
# but for those one could just explicitly do something like
# _masked_classes[list] = MaskedNDArray.
return MaskedNDArray
# Create (and therefore register) new Masked subclass for the
# given data_cls.
masked_cls = type('Masked' + data_cls.__name__,
(data_cls, base_cls), {}, data_cls=data_cls)
return masked_cls
@classmethod
def _get_data_and_mask(cls, data, allow_ma_masked=False):
"""Split data into unmasked and mask, if present.
Parameters
----------
data : array-like
Possibly masked item, judged by whether it has a ``mask`` attribute.
If so, checks for being an instance of `~astropy.utils.masked.Masked`
or `~numpy.ma.MaskedArray`, and gets unmasked data appropriately.
allow_ma_masked : bool, optional
Whether or not to process `~numpy.ma.masked`, i.e., an item that
implies no data but the presence of a mask.
Returns
-------
unmasked, mask : array-like
Unmasked will be `None` for `~numpy.ma.masked`.
Raises
------
ValueError
If `~numpy.ma.masked` is passed in and ``allow_ma_masked`` is not set.
"""
mask = getattr(data, 'mask', None)
if mask is not None:
try:
data = data.unmasked
except AttributeError:
if not isinstance(data, np.ma.MaskedArray):
raise
if data is np.ma.masked:
if allow_ma_masked:
data = None
else:
raise ValueError('cannot handle np.ma.masked here.') from None
else:
data = data.data
return data, mask
@classmethod
def _get_data_and_masks(cls, *args):
data_masks = [cls._get_data_and_mask(arg) for arg in args]
return (tuple(data for data, _ in data_masks),
tuple(mask for _, mask in data_masks))
def _get_mask(self):
"""The mask.
If set, replace the original mask, with whatever it is set with,
using a view if no broadcasting or type conversion is required.
"""
return self._mask
def _set_mask(self, mask, copy=False):
self_dtype = getattr(self, 'dtype', None)
mask_dtype = (np.ma.make_mask_descr(self_dtype)
if self_dtype and self_dtype.names else np.dtype('?'))
ma = np.asanyarray(mask, dtype=mask_dtype)
if ma.shape != self.shape:
# This will fail (correctly) if not broadcastable.
self._mask = np.empty(self.shape, dtype=mask_dtype)
self._mask[...] = ma
elif ma is mask:
# Even if not copying use a view so that shape setting
# does not propagate.
self._mask = mask.copy() if copy else mask.view()
else:
self._mask = ma
mask = property(_get_mask, _set_mask)
# Note: subclass should generally override the unmasked property.
# This one assumes the unmasked data is stored in a private attribute.
@property
def unmasked(self):
"""The unmasked values.
See Also
--------
astropy.utils.masked.Masked.filled
"""
return self._unmasked
def filled(self, fill_value):
"""Get a copy of the underlying data, with masked values filled in.
Parameters
----------
fill_value : object
Value to replace masked values with.
See Also
--------
astropy.utils.masked.Masked.unmasked
"""
unmasked = self.unmasked.copy()
if self.mask.dtype.names:
np.ma.core._recursive_filled(unmasked, self.mask, fill_value)
else:
unmasked[self.mask] = fill_value
return unmasked
def _apply(self, method, *args, **kwargs):
# Required method for NDArrayShapeMethods, to help provide __getitem__
# and shape-changing methods.
if callable(method):
data = method(self.unmasked, *args, **kwargs)
mask = method(self.mask, *args, **kwargs)
else:
data = getattr(self.unmasked, method)(*args, **kwargs)
mask = getattr(self.mask, method)(*args, **kwargs)
result = self.from_unmasked(data, mask, copy=False)
if 'info' in self.__dict__:
result.info = self.info
return result
def __setitem__(self, item, value):
value, mask = self._get_data_and_mask(value, allow_ma_masked=True)
if value is not None:
self.unmasked[item] = value
self.mask[item] = mask
class MaskedInfoBase:
mask_val = np.ma.masked
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
self.serialize_method = {'fits': 'null_value',
'ecsv': 'null_value',
'hdf5': 'data_mask',
'parquet': 'data_mask',
None: 'null_value'}
class MaskedNDArrayInfo(MaskedInfoBase, ParentDtypeInfo):
"""
Container for meta information like name, description, format.
"""
# Add `serialize_method` attribute to the attrs that MaskedNDArrayInfo knows
# about. This allows customization of the way that MaskedColumn objects
# get written to file depending on format. The default is to use whatever
# the writer would normally do, which in the case of FITS or ECSV is to use
# a NULL value within the data itself. If serialize_method is 'data_mask'
# then the mask is explicitly written out as a separate column if there
# are any masked values. This is the same as for MaskedColumn.
attr_names = ParentDtypeInfo.attr_names | {'serialize_method'}
# When `serialize_method` is 'data_mask', and data and mask are being written
# as separate columns, use column names <name> and <name>.mask (instead
# of default encoding as <name>.data and <name>.mask).
_represent_as_dict_primary_data = 'data'
def _represent_as_dict(self):
out = super()._represent_as_dict()
masked_array = self._parent
# If the serialize method for this context (e.g. 'fits' or 'ecsv') is
# 'data_mask', that means to serialize using an explicit mask column.
method = self.serialize_method[self._serialize_context]
if method == 'data_mask':
out['data'] = masked_array.unmasked
if np.any(masked_array.mask):
# Only if there are actually masked elements do we add the ``mask`` column
out['mask'] = masked_array.mask
elif method == 'null_value':
out['data'] = np.ma.MaskedArray(masked_array.unmasked,
mask=masked_array.mask)
else:
raise ValueError('serialize method must be either "data_mask" or "null_value"')
return out
def _construct_from_dict(self, map):
# Override usual handling, since MaskedNDArray takes shape and buffer
# as input, which is less useful here.
# The map can contain either a MaskedColumn or a Column and a mask.
# Extract the mask for the former case.
map.setdefault('mask', getattr(map['data'], 'mask', False))
return self._parent_cls.from_unmasked(**map)
class MaskedArraySubclassInfo(MaskedInfoBase):
"""Mixin class to create a subclasses such as MaskedQuantityInfo."""
# This is used below in __init_subclass__, which also inserts a
# 'serialize_method' attribute in attr_names.
def _represent_as_dict(self):
# Use the data_cls as the class name for serialization,
# so that we do not have to store all possible masked classes
# in astropy.table.serialize.__construct_mixin_classes.
out = super()._represent_as_dict()
data_cls = self._parent._data_cls
out.setdefault('__class__',
data_cls.__module__ + '.' + data_cls.__name__)
return out
def _comparison_method(op):
"""
Create a comparison operator for MaskedNDArray.
Needed since for string dtypes the base operators bypass __array_ufunc__
and hence return unmasked results.
"""
def _compare(self, other):
other_data, other_mask = self._get_data_and_mask(other)
result = getattr(self.unmasked, op)(other_data)
if result is NotImplemented:
return NotImplemented
mask = self.mask | (other_mask if other_mask is not None else False)
return self._masked_result(result, mask, None)
return _compare
class MaskedIterator:
"""
Flat iterator object to iterate over Masked Arrays.
A `~astropy.utils.masked.MaskedIterator` iterator is returned by ``m.flat``
for any masked array ``m``. It allows iterating over the array as if it
were a 1-D array, either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
Notes
-----
The design of `~astropy.utils.masked.MaskedIterator` follows that of
`~numpy.ma.core.MaskedIterator`. It is not exported by the
`~astropy.utils.masked` module. Instead of instantiating directly,
use the ``flat`` method in the masked array instance.
"""
def __init__(self, m):
self._masked = m
self._dataiter = m.unmasked.flat
self._maskiter = m.mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
mask = self._maskiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Masked array.
if not isinstance(out, np.ndarray):
out = out[...]
mask = mask[...]
return self._masked.from_unmasked(out, mask, copy=False)
def __setitem__(self, index, value):
data, mask = self._masked._get_data_and_mask(value, allow_ma_masked=True)
if data is not None:
self._dataiter[index] = data
self._maskiter[index] = mask
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)[...]
mask = next(self._maskiter)[...]
return self._masked.from_unmasked(out, mask, copy=False)
next = __next__
class MaskedNDArray(Masked, np.ndarray, base_cls=np.ndarray, data_cls=np.ndarray):
_mask = None
info = MaskedNDArrayInfo()
def __new__(cls, *args, mask=None, **kwargs):
"""Get data class instance from arguments and then set mask."""
self = super().__new__(cls, *args, **kwargs)
if mask is not None:
self.mask = mask
elif self._mask is None:
self.mask = False
return self
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(cls, **kwargs)
# For all subclasses we should set a default __new__ that passes on
# arguments other than mask to the data class, and then sets the mask.
if '__new__' not in cls.__dict__:
def __new__(newcls, *args, mask=None, **kwargs):
"""Get data class instance from arguments and then set mask."""
# Need to explicitly mention classes outside of class definition.
self = super(cls, newcls).__new__(newcls, *args, **kwargs)
if mask is not None:
self.mask = mask
elif self._mask is None:
self.mask = False
return self
cls.__new__ = __new__
if 'info' not in cls.__dict__ and hasattr(cls._data_cls, 'info'):
data_info = cls._data_cls.info
attr_names = data_info.attr_names | {'serialize_method'}
new_info = type(cls.__name__+'Info',
(MaskedArraySubclassInfo, data_info.__class__),
dict(attr_names=attr_names))
cls.info = new_info()
# The two pieces typically overridden.
@classmethod
def from_unmasked(cls, data, mask=None, copy=False):
# Note: have to override since __new__ would use ndarray.__new__
# which expects the shape as its first argument, not an array.
data = np.array(data, subok=True, copy=copy)
self = data.view(cls)
self._set_mask(mask, copy=copy)
return self
@property
def unmasked(self):
return super().view(self._data_cls)
@classmethod
def _get_masked_cls(cls, data_cls):
# Short-cuts
if data_cls is np.ndarray:
return MaskedNDArray
elif data_cls is None: # for .view()
return cls
return super()._get_masked_cls(data_cls)
@property
def flat(self):
"""A 1-D iterator over the Masked array.
This returns a ``MaskedIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to Python's built-in iterator, except that it also
allows assignment.
"""
return MaskedIterator(self)
@property
def _baseclass(self):
"""Work-around for MaskedArray initialization.
Allows the base class to be inferred correctly when a masked instance
is used to initialize (or viewed as) a `~numpy.ma.MaskedArray`.
"""
return self._data_cls
def view(self, dtype=None, type=None):
"""New view of the masked array.
Like `numpy.ndarray.view`, but always returning a masked array subclass.
"""
if type is None and (isinstance(dtype, builtins.type)
and issubclass(dtype, np.ndarray)):
return super().view(self._get_masked_cls(dtype))
if dtype is None:
return super().view(self._get_masked_cls(type))
dtype = np.dtype(dtype)
if not (dtype.itemsize == self.dtype.itemsize
and (dtype.names is None
or len(dtype.names) == len(self.dtype.names))):
raise NotImplementedError(
f"{self.__class__} cannot be viewed with a dtype with a "
f"with a different number of fields or size.")
return super().view(dtype, self._get_masked_cls(type))
def __array_finalize__(self, obj):
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# Logically, this should come from ndarray and hence be None, but
# just in case someone creates a new mixin, we check.
super_array_finalize = super().__array_finalize__
if super_array_finalize: # pragma: no cover
super_array_finalize(obj)
if self._mask is None:
# Got here after, e.g., a view of another masked class.
# Get its mask, or initialize ours.
self._set_mask(getattr(obj, '_mask', False))
if 'info' in obj.__dict__:
self.info = obj.info
@property
def shape(self):
"""The shape of the data and the mask.
Usually used to get the current shape of an array, but may also be
used to reshape the array in-place by assigning a tuple of array
dimensions to it. As with `numpy.reshape`, one of the new shape
dimensions can be -1, in which case its value is inferred from the
size of the array and the remaining dimensions.
Raises
------
AttributeError
If a copy is required, of either the data or the mask.
"""
# Redefinition to allow defining a setter and add a docstring.
return super().shape
@shape.setter
def shape(self, shape):
old_shape = self.shape
self._mask.shape = shape
# Reshape array proper in try/except just in case some broadcasting
# or so causes it to fail.
try:
super(MaskedNDArray, type(self)).shape.__set__(self, shape)
except Exception as exc:
self._mask.shape = old_shape
# Given that the mask reshaping succeeded, the only logical
# reason for an exception is something like a broadcast error in
# in __array_finalize__, or a different memory ordering between
# mask and data. For those, give a more useful error message;
# otherwise just raise the error.
if 'could not broadcast' in exc.args[0]:
raise AttributeError(
'Incompatible shape for in-place modification. '
'Use `.reshape()` to make a copy with the desired '
'shape.') from None
else: # pragma: no cover
raise
_eq_simple = _comparison_method('__eq__')
_ne_simple = _comparison_method('__ne__')
__lt__ = _comparison_method('__lt__')
__le__ = _comparison_method('__le__')
__gt__ = _comparison_method('__gt__')
__ge__ = _comparison_method('__ge__')
def __eq__(self, other):
if not self.dtype.names:
return self._eq_simple(other)
# For structured arrays, we treat this as a reduction over the fields,
# where masked fields are skipped and thus do not influence the result.
other = np.asanyarray(other, dtype=self.dtype)
result = np.stack([self[field] == other[field]
for field in self.dtype.names], axis=-1)
return result.all(axis=-1)
def __ne__(self, other):
if not self.dtype.names:
return self._ne_simple(other)
# For structured arrays, we treat this as a reduction over the fields,
# where masked fields are skipped and thus do not influence the result.
other = np.asanyarray(other, dtype=self.dtype)
result = np.stack([self[field] != other[field]
for field in self.dtype.names], axis=-1)
return result.any(axis=-1)
def _combine_masks(self, masks, out=None):
masks = [m for m in masks if m is not None and m is not False]
if not masks:
return False
if len(masks) == 1:
if out is None:
return masks[0].copy()
else:
np.copyto(out, masks[0])
return out
out = np.logical_or(masks[0], masks[1], out=out)
for mask in masks[2:]:
np.logical_or(out, mask, out=out)
return out
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.pop('out', None)
out_unmasked = None
out_mask = None
if out is not None:
out_unmasked, out_masks = self._get_data_and_masks(*out)
for d, m in zip(out_unmasked, out_masks):
if m is None:
# TODO: allow writing to unmasked output if nothing is masked?
if d is not None:
raise TypeError('cannot write to unmasked output')
elif out_mask is None:
out_mask = m
unmasked, masks = self._get_data_and_masks(*inputs)
if ufunc.signature:
# We're dealing with a gufunc. For now, only deal with
# np.matmul and gufuncs for which the mask of any output always
# depends on all core dimension values of all inputs.
# Also ignore axes keyword for now...
# TODO: in principle, it should be possible to generate the mask
# purely based on the signature.
if 'axes' in kwargs:
raise NotImplementedError("Masked does not yet support gufunc "
"calls with 'axes'.")
if ufunc is np.matmul:
# np.matmul is tricky and its signature cannot be parsed by
# _parse_gufunc_signature.
unmasked = np.atleast_1d(*unmasked)
mask0, mask1 = masks
masks = []
is_mat1 = unmasked[1].ndim >= 2
if mask0 is not None:
masks.append(
np.logical_or.reduce(mask0, axis=-1, keepdims=is_mat1))
if mask1 is not None:
masks.append(
np.logical_or.reduce(mask1, axis=-2, keepdims=True)
if is_mat1 else
np.logical_or.reduce(mask1))
mask = self._combine_masks(masks, out=out_mask)
else:
# Parse signature with private numpy function. Note it
# cannot handle spaces in tuples, so remove those.
in_sig, out_sig = np.lib.function_base._parse_gufunc_signature(
ufunc.signature.replace(' ', ''))
axis = kwargs.get('axis', -1)
keepdims = kwargs.get('keepdims', False)
in_masks = []
for sig, mask in zip(in_sig, masks):
if mask is not None:
if sig:
# Input has core dimensions. Assume that if any
# value in those is masked, the output will be
# masked too (TODO: for multiple core dimensions
# this may be too strong).
mask = np.logical_or.reduce(
mask, axis=axis, keepdims=keepdims)
in_masks.append(mask)
mask = self._combine_masks(in_masks)
result_masks = []
for os in out_sig:
if os:
# Output has core dimensions. Assume all those
# get the same mask.
result_mask = np.expand_dims(mask, axis)
else:
result_mask = mask
result_masks.append(result_mask)
mask = result_masks if len(result_masks) > 1 else result_masks[0]
elif method == '__call__':
# Regular ufunc call.
mask = self._combine_masks(masks, out=out_mask)
elif method == 'outer':
# Must have two arguments; adjust masks as will be done for data.
assert len(masks) == 2
masks = [(m if m is not None else False) for m in masks]
mask = np.logical_or.outer(masks[0], masks[1], out=out_mask)
elif method in {'reduce', 'accumulate'}:
# Reductions like np.add.reduce (sum).
if masks[0] is not None:
# By default, we simply propagate masks, since for
# things like np.sum, it makes no sense to do otherwise.
# Individual methods need to override as needed.
# TODO: take care of 'out' too?
if method == 'reduce':
axis = kwargs.get('axis', None)
keepdims = kwargs.get('keepdims', False)
where = kwargs.get('where', True)
mask = np.logical_or.reduce(masks[0], where=where,
axis=axis, keepdims=keepdims,
out=out_mask)
if where is not True:
# Mask also whole rows that were not selected by where,
# so would have been left as unmasked above.
mask |= np.logical_and.reduce(masks[0], where=where,
axis=axis, keepdims=keepdims)
else:
# Accumulate
axis = kwargs.get('axis', 0)
mask = np.logical_or.accumulate(masks[0], axis=axis,
out=out_mask)
elif out is not None:
mask = False
else: # pragma: no cover
# Can only get here if neither input nor output was masked, but
# perhaps axis or where was masked (in numpy < 1.21 this is
# possible). We don't support this.
return NotImplemented
elif method in {'reduceat', 'at'}: # pragma: no cover
# TODO: implement things like np.add.accumulate (used for cumsum).
raise NotImplementedError("masked instances cannot yet deal with "
"'reduceat' or 'at'.")
if out_unmasked is not None:
kwargs['out'] = out_unmasked
result = getattr(ufunc, method)(*unmasked, **kwargs)
if result is None: # pragma: no cover
# This happens for the "at" method.
return result
if out is not None and len(out) == 1:
out = out[0]
return self._masked_result(result, mask, out)
def __array_function__(self, function, types, args, kwargs):
# TODO: go through functions systematically to see which ones
# work and/or can be supported.
if function in MASKED_SAFE_FUNCTIONS:
return super().__array_function__(function, types, args, kwargs)
elif function in APPLY_TO_BOTH_FUNCTIONS:
helper = APPLY_TO_BOTH_FUNCTIONS[function]
try:
helper_result = helper(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
data_args, mask_args, kwargs, out = helper_result
if out is not None:
if not isinstance(out, Masked):
return self._not_implemented_or_raise(function, types)
function(*mask_args, out=out.mask, **kwargs)
function(*data_args, out=out.unmasked, **kwargs)
return out
mask = function(*mask_args, **kwargs)
result = function(*data_args, **kwargs)
elif function in DISPATCHED_FUNCTIONS:
dispatched_function = DISPATCHED_FUNCTIONS[function]
try:
dispatched_result = dispatched_function(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
if not isinstance(dispatched_result, tuple):
return dispatched_result
result, mask, out = dispatched_result
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else: # pragma: no cover
# By default, just pass it through for now.
return super().__array_function__(function, types, args, kwargs)
if mask is None:
return result
else:
return self._masked_result(result, mask, out)
def _not_implemented_or_raise(self, function, types):
# Our function helper or dispatcher found that the function does not
# work with Masked. In principle, there may be another class that
# knows what to do with us, for which we should return NotImplemented.
# But if there is ndarray (or a non-Masked subclass of it) around,
# it quite likely coerces, so we should just break.
if any(issubclass(t, np.ndarray) and not issubclass(t, Masked)
for t in types):
raise TypeError("the MaskedNDArray implementation cannot handle {} "
"with the given arguments."
.format(function)) from None
else:
return NotImplemented
def _masked_result(self, result, mask, out):
if isinstance(result, tuple):
if out is None:
out = (None,) * len(result)
if not isinstance(mask, (list, tuple)):
mask = (mask,) * len(result)
return tuple(self._masked_result(result_, mask_, out_)
for (result_, mask_, out_) in zip(result, mask, out))
if out is None:
# Note that we cannot count on result being the same class as
# 'self' (e.g., comparison of quantity results in an ndarray, most
# operations on Longitude and Latitude result in Angle or
# Quantity), so use Masked to determine the appropriate class.
return Masked(result, mask)
# TODO: remove this sanity check once test cases are more complete.
assert isinstance(out, Masked)
# If we have an output, the result was written in-place, so we should
# also write the mask in-place (if not done already in the code).
if out._mask is not mask:
out._mask[...] = mask
return out
# Below are ndarray methods that need to be overridden as masked elements
# need to be skipped and/or an initial value needs to be set.
def _reduce_defaults(self, kwargs, initial_func=None):
"""Get default where and initial for masked reductions.
Generally, the default should be to skip all masked elements. For
reductions such as np.minimum.reduce, we also need an initial value,
which can be determined using ``initial_func``.
"""
if 'where' not in kwargs:
kwargs['where'] = ~self.mask
if initial_func is not None and 'initial' not in kwargs:
kwargs['initial'] = initial_func(self.unmasked)
return kwargs
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
# Unfortunately, cannot override the call to diagonal inside trace, so
# duplicate implementation in numpy/core/src/multiarray/calculation.c.
diagonal = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)
return diagonal.sum(-1, dtype=dtype, out=out)
def min(self, axis=None, out=None, **kwargs):
return super().min(axis=axis, out=out,
**self._reduce_defaults(kwargs, np.nanmax))
def max(self, axis=None, out=None, **kwargs):
return super().max(axis=axis, out=out,
**self._reduce_defaults(kwargs, np.nanmin))
def nonzero(self):
unmasked_nonzero = self.unmasked.nonzero()
if self.ndim >= 1:
not_masked = ~self.mask[unmasked_nonzero]
return tuple(u[not_masked] for u in unmasked_nonzero)
else:
return unmasked_nonzero if not self.mask else np.nonzero(0)
def compress(self, condition, axis=None, out=None):
if out is not None:
raise NotImplementedError('cannot yet give output')
return self._apply('compress', condition, axis=axis)
def repeat(self, repeats, axis=None):
return self._apply('repeat', repeats, axis=axis)
def choose(self, choices, out=None, mode='raise'):
# Let __array_function__ take care since choices can be masked too.
return np.choose(self, choices, out=out, mode=mode)
def argmin(self, axis=None, out=None):
# Todo: should this return a masked integer array, with masks
# if all elements were masked?
at_min = self == self.min(axis=axis, keepdims=True)
return at_min.filled(False).argmax(axis=axis, out=out)
def argmax(self, axis=None, out=None):
at_max = self == self.max(axis=axis, keepdims=True)
return at_max.filled(False).argmax(axis=axis, out=out)
def argsort(self, axis=-1, kind=None, order=None):
"""Returns the indices that would sort an array.
Perform an indirect sort along the given axis on both the array
and the mask, with masked items being sorted to the end.
Parameters
----------
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis).
If None, the flattened array is used.
kind : str or None, ignored.
The kind of sort. Present only to allow subclasses to work.
order : str or list of str.
For an array with fields defined, the fields to compare first,
second, etc. A single field can be specified as a string, and not
all fields need be specified, but unspecified fields will still be
used, in dtype order, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sorts along the specified ``axis``. Use
``np.take_along_axis(self, index_array, axis=axis)`` to obtain
the sorted array.
"""
if axis is None:
data = self.ravel()
axis = -1
else:
data = self
if self.dtype.names:
# As done inside the argsort implementation in multiarray/methods.c.
if order is None:
order = self.dtype.names
else:
order = np.core._internal._newnames(self.dtype, order)
keys = tuple(data[name] for name in order[::-1])
elif order is not None:
raise ValueError('Cannot specify order when the array has no fields.')
else:
keys = (data,)
return np.lexsort(keys, axis=axis)
def sort(self, axis=-1, kind=None, order=None):
"""Sort an array in-place. Refer to `numpy.sort` for full documentation."""
# TODO: probably possible to do this faster than going through argsort!
indices = self.argsort(axis, kind=kind, order=order)
self[:] = np.take_along_axis(self, indices, axis=axis)
def argpartition(self, kth, axis=-1, kind='introselect', order=None):
# TODO: should be possible to do this faster than with a full argsort!
return self.argsort(axis=axis, order=order)
def partition(self, kth, axis=-1, kind='introselect', order=None):
# TODO: should be possible to do this faster than with a full argsort!
return self.sort(axis=axis, order=None)
def cumsum(self, axis=None, dtype=None, out=None):
if axis is None:
self = self.ravel()
axis = 0
return np.add.accumulate(self, axis=axis, dtype=dtype, out=out)
def cumprod(self, axis=None, dtype=None, out=None):
if axis is None:
self = self.ravel()
axis = 0
return np.multiply.accumulate(self, axis=axis, dtype=dtype, out=out)
def clip(self, min=None, max=None, out=None, **kwargs):
"""Return an array whose values are limited to ``[min, max]``.
Like `~numpy.clip`, but any masked values in ``min`` and ``max``
are ignored for clipping. The mask of the input array is propagated.
"""
# TODO: implement this at the ufunc level.
dmin, mmin = self._get_data_and_mask(min)
dmax, mmax = self._get_data_and_mask(max)
if mmin is None and mmax is None:
# Fast path for unmasked max, min.
return super().clip(min, max, out=out, **kwargs)
masked_out = np.positive(self, out=out)
out = masked_out.unmasked
if dmin is not None:
np.maximum(out, dmin, out=out, where=True if mmin is None else ~mmin)
if dmax is not None:
np.minimum(out, dmax, out=out, where=True if mmax is None else ~mmax)
return masked_out
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
# Implementation based on that in numpy/core/_methods.py
# Cast bool, unsigned int, and int to float64 by default,
# and do float16 at higher precision.
is_float16_result = False
if dtype is None:
if issubclass(self.dtype.type, (np.integer, np.bool_)):
dtype = np.dtype('f8')
elif issubclass(self.dtype.type, np.float16):
dtype = np.dtype('f4')
is_float16_result = out is None
where = ~self.mask & where
result = self.sum(axis=axis, dtype=dtype, out=out,
keepdims=keepdims, where=where)
n = np.add.reduce(where, axis=axis, keepdims=keepdims)
result /= n
if is_float16_result:
result = result.astype(self.dtype)
return result
def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True):
where_final = ~self.mask & where
# Simplified implementation based on that in numpy/core/_methods.py
n = np.add.reduce(where_final, axis=axis, keepdims=keepdims)[...]
# Cast bool, unsigned int, and int to float64 by default.
if dtype is None and issubclass(self.dtype.type,
(np.integer, np.bool_)):
dtype = np.dtype('f8')
mean = self.mean(axis=axis, dtype=dtype, keepdims=True, where=where)
x = self - mean
x *= x.conjugate() # Conjugate just returns x if not complex.
result = x.sum(axis=axis, dtype=dtype, out=out,
keepdims=keepdims, where=where_final)
n -= ddof
n = np.maximum(n, 0, out=n)
result /= n
result._mask |= (n == 0)
return result
def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True):
result = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims, where=where)
return np.sqrt(result, out=result)
def __bool__(self):
# First get result from array itself; this will error if not a scalar.
result = super().__bool__()
return result and not self.mask
def any(self, axis=None, out=None, keepdims=False, *, where=True):
return np.logical_or.reduce(self, axis=axis, out=out,
keepdims=keepdims, where=~self.mask & where)
def all(self, axis=None, out=None, keepdims=False, *, where=True):
return np.logical_and.reduce(self, axis=axis, out=out,
keepdims=keepdims, where=~self.mask & where)
# Following overrides needed since somehow the ndarray implementation
# does not actually call these.
def __str__(self):
return np.array_str(self)
def __repr__(self):
return np.array_repr(self)
def __format__(self, format_spec):
string = super().__format__(format_spec)
if self.shape == () and self.mask:
n = min(3, max(1, len(string)))
return ' ' * (len(string)-n) + '\u2014' * n
else:
return string
class MaskedRecarray(np.recarray, MaskedNDArray, data_cls=np.recarray):
# Explicit definition since we need to override some methods.
def __array_finalize__(self, obj):
# recarray.__array_finalize__ does not do super, so we do it
# explicitly.
super().__array_finalize__(obj)
super(np.recarray, self).__array_finalize__(obj)
# __getattribute__, __setattr__, and field use these somewhat
# obscrure ndarray methods. TODO: override in MaskedNDArray?
def getfield(self, dtype, offset=0):
for field, info in self.dtype.fields.items():
if offset == info[1] and dtype == info[0]:
return self[field]
raise NotImplementedError('can only get existing field from '
'structured dtype.')
def setfield(self, val, dtype, offset=0):
for field, info in self.dtype.fields.items():
if offset == info[1] and dtype == info[0]:
self[field] = val
return
raise NotImplementedError('can only set existing field from '
'structured dtype.')
|
6a02a6c6a2b9e7990568eb29915ee60e7f1ff51ac6bc453e591323e5b38cc3d1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Helpers for letting numpy functions interact with Masked arrays.
The module supplies helper routines for numpy functions that propagate
masks appropriately., for use in the ``__array_function__``
implementation of `~astropy.utils.masked.MaskedNDArray`. They are not
very useful on their own, but the ones with docstrings are included in
the documentation so that there is a place to find out how the mask is
interpreted.
"""
import numpy as np
from astropy.units.quantity_helper.function_helpers import (
FunctionAssigner)
from astropy.utils.compat import NUMPY_LT_1_19, NUMPY_LT_1_20, NUMPY_LT_1_23
# This module should not really be imported, but we define __all__
# such that sphinx can typeset the functions with docstrings.
# The latter are added to __all__ at the end.
__all__ = ['MASKED_SAFE_FUNCTIONS', 'APPLY_TO_BOTH_FUNCTIONS',
'DISPATCHED_FUNCTIONS', 'UNSUPPORTED_FUNCTIONS']
MASKED_SAFE_FUNCTIONS = set()
"""Set of functions that work fine on Masked classes already.
Most of these internally use `numpy.ufunc` or other functions that
are already covered.
"""
APPLY_TO_BOTH_FUNCTIONS = {}
"""Dict of functions that should apply to both data and mask.
The `dict` is keyed by the numpy function and the values are functions
that take the input arguments of the numpy function and organize these
for passing the data and mask to the numpy function.
Returns
-------
data_args : tuple
Arguments to pass on to the numpy function for the unmasked data.
mask_args : tuple
Arguments to pass on to the numpy function for the masked data.
kwargs : dict
Keyword arguments to pass on for both unmasked data and mask.
out : `~astropy.utils.masked.Masked` instance or None
Optional instance in which to store the output.
Raises
------
NotImplementedError
When an arguments is masked when it should not be or vice versa.
"""
DISPATCHED_FUNCTIONS = {}
"""Dict of functions that provide the numpy function's functionality.
These are for more complicated versions where the numpy function itself
cannot easily be used. It should return either the result of the
function, or a tuple consisting of the unmasked result, the mask for the
result and a possible output instance.
It should raise `NotImplementedError` if one of the arguments is masked
when it should not be or vice versa.
"""
UNSUPPORTED_FUNCTIONS = set()
"""Set of numpy functions that are not supported for masked arrays.
For most, masked input simply makes no sense, but for others it may have
been lack of time. Issues or PRs for support for functions are welcome.
"""
# Almost all from np.core.fromnumeric defer to methods so are OK.
MASKED_SAFE_FUNCTIONS |= set(
getattr(np, name) for name in np.core.fromnumeric.__all__
if name not in ({'choose', 'put', 'resize', 'searchsorted', 'where', 'alen'}))
MASKED_SAFE_FUNCTIONS |= {
# built-in from multiarray
np.may_share_memory, np.can_cast, np.min_scalar_type, np.result_type,
np.shares_memory,
# np.core.arrayprint
np.array_repr,
# np.core.function_base
np.linspace, np.logspace, np.geomspace,
# np.core.numeric
np.isclose, np.allclose, np.flatnonzero, np.argwhere,
# np.core.shape_base
np.atleast_1d, np.atleast_2d, np.atleast_3d, np.stack, np.hstack, np.vstack,
# np.lib.function_base
np.average, np.diff, np.extract, np.meshgrid, np.trapz, np.gradient,
# np.lib.index_tricks
np.diag_indices_from, np.triu_indices_from, np.tril_indices_from,
np.fill_diagonal,
# np.lib.shape_base
np.column_stack, np.row_stack, np.dstack,
np.array_split, np.split, np.hsplit, np.vsplit, np.dsplit,
np.expand_dims, np.apply_along_axis, np.kron, np.tile,
np.take_along_axis, np.put_along_axis,
# np.lib.type_check (all but asfarray, nan_to_num)
np.iscomplexobj, np.isrealobj, np.imag, np.isreal,
np.real, np.real_if_close, np.common_type,
# np.lib.ufunclike
np.fix, np.isneginf, np.isposinf,
# np.lib.function_base
np.angle, np.i0,
}
IGNORED_FUNCTIONS = {
# I/O - useless for Masked, since no way to store the mask.
np.save, np.savez, np.savetxt, np.savez_compressed,
# Polynomials
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander}
if NUMPY_LT_1_20:
# financial
IGNORED_FUNCTIONS |= {np.fv, np.ipmt, np.irr, np.mirr, np.nper,
np.npv, np.pmt, np.ppmt, np.pv, np.rate}
# TODO: some of the following could in principle be supported.
IGNORED_FUNCTIONS |= {
np.pad,
np.searchsorted, np.digitize,
np.is_busday, np.busday_count, np.busday_offset,
# numpy.lib.function_base
np.cov, np.corrcoef, np.trim_zeros,
# numpy.core.numeric
np.correlate, np.convolve,
# numpy.lib.histograms
np.histogram, np.histogram2d, np.histogramdd, np.histogram_bin_edges,
# TODO!!
np.dot, np.vdot, np.inner, np.tensordot, np.cross,
np.einsum, np.einsum_path,
}
# Really should do these...
IGNORED_FUNCTIONS |= set(getattr(np, setopsname)
for setopsname in np.lib.arraysetops.__all__)
if NUMPY_LT_1_23:
IGNORED_FUNCTIONS |= {
# Deprecated, removed in numpy 1.23
np.asscalar, np.alen,
}
# Explicitly unsupported functions
UNSUPPORTED_FUNCTIONS |= {
np.unravel_index, np.ravel_multi_index, np.ix_,
}
# No support for the functions also not supported by Quantity
# (io, polynomial, etc.).
UNSUPPORTED_FUNCTIONS |= IGNORED_FUNCTIONS
apply_to_both = FunctionAssigner(APPLY_TO_BOTH_FUNCTIONS)
dispatched_function = FunctionAssigner(DISPATCHED_FUNCTIONS)
def _get_data_and_masks(*args):
"""Separate out arguments into tuples of data and masks.
An all-False mask is created if an argument does not have a mask.
"""
from .core import Masked
data, masks = Masked._get_data_and_masks(*args)
masks = tuple(m if m is not None else np.zeros(np.shape(d), bool)
for d, m in zip(data, masks))
return data, masks
# Following are simple ufunc-like functions which should just copy the mask.
@dispatched_function
def datetime_as_string(arr, *args, **kwargs):
return (np.datetime_as_string(arr.unmasked, *args, **kwargs),
arr.mask.copy(), None)
@dispatched_function
def sinc(x):
return np.sinc(x.unmasked), x.mask.copy(), None
@dispatched_function
def iscomplex(x):
return np.iscomplex(x.unmasked), x.mask.copy(), None
@dispatched_function
def unwrap(p, *args, **kwargs):
return np.unwrap(p.unmasked, *args, **kwargs), p.mask.copy(), None
@dispatched_function
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
data = np.nan_to_num(x.unmasked, copy=copy,
nan=nan, posinf=posinf, neginf=neginf)
return (data, x.mask.copy(), None) if copy else x
# Following are simple functions related to shapes, where the same function
# should be applied to the data and the mask. They cannot all share the
# same helper, because the first arguments have different names.
@apply_to_both(helps={
np.copy, np.asfarray, np.resize, np.moveaxis, np.rollaxis, np.roll})
def masked_a_helper(a, *args, **kwargs):
data, mask = _get_data_and_masks(a)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.flip, np.flipud, np.fliplr, np.rot90, np.triu, np.tril})
def masked_m_helper(m, *args, **kwargs):
data, mask = _get_data_and_masks(m)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.diag, np.diagflat})
def masked_v_helper(v, *args, **kwargs):
data, mask = _get_data_and_masks(v)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.delete})
def masked_arr_helper(array, *args, **kwargs):
data, mask = _get_data_and_masks(array)
return data + args, mask + args, kwargs, None
@apply_to_both
def broadcast_to(array, shape, subok=False):
"""Broadcast array to the given shape.
Like `numpy.broadcast_to`, and applied to both unmasked data and mask.
Note that ``subok`` is taken to mean whether or not subclasses of
the unmasked data and mask are allowed, i.e., for ``subok=False``,
a `~astropy.utils.masked.MaskedNDArray` will be returned.
"""
data, mask = _get_data_and_masks(array)
return data, mask, dict(shape=shape, subok=subok), None
@dispatched_function
def outer(a, b, out=None):
return np.multiply.outer(np.ravel(a), np.ravel(b), out=out)
@dispatched_function
def empty_like(prototype, dtype=None, order='K', subok=True, shape=None):
"""Return a new array with the same shape and type as a given array.
Like `numpy.empty_like`, but will add an empty mask.
"""
unmasked = np.empty_like(prototype.unmasked, dtype=dtype, order=order,
subok=subok, shape=shape)
if dtype is not None:
dtype = (np.ma.make_mask_descr(unmasked.dtype)
if unmasked.dtype.names else np.dtype('?'))
mask = np.empty_like(prototype.mask, dtype=dtype, order=order,
subok=subok, shape=shape)
return unmasked, mask, None
@dispatched_function
def zeros_like(a, dtype=None, order='K', subok=True, shape=None):
"""Return an array of zeros with the same shape and type as a given array.
Like `numpy.zeros_like`, but will add an all-false mask.
"""
unmasked = np.zeros_like(a.unmasked, dtype=dtype, order=order,
subok=subok, shape=shape)
return unmasked, False, None
@dispatched_function
def ones_like(a, dtype=None, order='K', subok=True, shape=None):
"""Return an array of ones with the same shape and type as a given array.
Like `numpy.ones_like`, but will add an all-false mask.
"""
unmasked = np.ones_like(a.unmasked, dtype=dtype, order=order,
subok=subok, shape=shape)
return unmasked, False, None
@dispatched_function
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):
"""Return a full array with the same shape and type as a given array.
Like `numpy.full_like`, but with a mask that is also set.
If ``fill_value`` is `numpy.ma.masked`, the data will be left unset
(i.e., as created by `numpy.empty_like`).
"""
result = np.empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
result[...] = fill_value
return result
@dispatched_function
def put(a, ind, v, mode='raise'):
"""Replaces specified elements of an array with given values.
Like `numpy.put`, but for masked array ``a`` and possibly masked
value ``v``. Masked indices ``ind`` are not supported.
"""
from astropy.utils.masked import Masked
if isinstance(ind, Masked) or not isinstance(a, Masked):
raise NotImplementedError
v_data, v_mask = a._get_data_and_mask(v)
if v_data is not None:
np.put(a.unmasked, ind, v_data, mode=mode)
# v_mask of None will be correctly interpreted as False.
np.put(a.mask, ind, v_mask, mode=mode)
return None
@dispatched_function
def putmask(a, mask, values):
"""Changes elements of an array based on conditional and input values.
Like `numpy.putmask`, but for masked array ``a`` and possibly masked
``values``. Masked ``mask`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(mask, Masked) or not isinstance(a, Masked):
raise NotImplementedError
values_data, values_mask = a._get_data_and_mask(values)
if values_data is not None:
np.putmask(a.unmasked, mask, values_data)
np.putmask(a.mask, mask, values_mask)
return None
@dispatched_function
def place(arr, mask, vals):
"""Change elements of an array based on conditional and input values.
Like `numpy.place`, but for masked array ``a`` and possibly masked
``values``. Masked ``mask`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(mask, Masked) or not isinstance(arr, Masked):
raise NotImplementedError
vals_data, vals_mask = arr._get_data_and_mask(vals)
if vals_data is not None:
np.place(arr.unmasked, mask, vals_data)
np.place(arr.mask, mask, vals_mask)
return None
@dispatched_function
def copyto(dst, src, casting='same_kind', where=True):
"""Copies values from one array to another, broadcasting as necessary.
Like `numpy.copyto`, but for masked destination ``dst`` and possibly
masked source ``src``.
"""
from astropy.utils.masked import Masked
if not isinstance(dst, Masked) or isinstance(where, Masked):
raise NotImplementedError
src_data, src_mask = dst._get_data_and_mask(src)
if src_data is not None:
np.copyto(dst.unmasked, src_data, casting=casting, where=where)
if src_mask is not None:
np.copyto(dst.mask, src_mask, where=where)
return None
@dispatched_function
def packbits(a, *args, **kwargs):
result = np.packbits(a.unmasked, *args, **kwargs)
mask = np.packbits(a.mask, *args, **kwargs).astype(bool)
return result, mask, None
@dispatched_function
def unpackbits(a, *args, **kwargs):
result = np.unpackbits(a.unmasked, *args, **kwargs)
mask = np.zeros(a.shape, dtype='u1')
mask[a.mask] = 255
mask = np.unpackbits(mask, *args, **kwargs).astype(bool)
return result, mask, None
@dispatched_function
def bincount(x, weights=None, minlength=0):
"""Count number of occurrences of each value in array of non-negative ints.
Like `numpy.bincount`, but masked entries in ``x`` will be skipped.
Any masked entries in ``weights`` will lead the corresponding bin to
be masked.
"""
from astropy.utils.masked import Masked
if weights is not None:
weights = np.asanyarray(weights)
if isinstance(x, Masked) and x.ndim <= 1:
# let other dimensions lead to errors.
if weights is not None and weights.ndim == x.ndim:
weights = weights[~x.mask]
x = x.unmasked[~x.mask]
mask = None
if weights is not None:
weights, w_mask = Masked._get_data_and_mask(weights)
if w_mask is not None:
mask = np.bincount(x, w_mask.astype(int),
minlength=minlength).astype(bool)
result = np.bincount(x, weights, minlength=0)
return result, mask, None
@dispatched_function
def msort(a):
result = a.copy()
result.sort(axis=0)
return result
@dispatched_function
def sort_complex(a):
# Just a copy of function_base.sort_complex, to avoid the asarray.
b = a.copy()
b.sort()
if not issubclass(b.dtype.type, np.complexfloating): # pragma: no cover
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
@apply_to_both
def concatenate(arrays, axis=0, out=None):
data, masks = _get_data_and_masks(*arrays)
return (data,), (masks,), dict(axis=axis), out
@apply_to_both
def append(arr, values, axis=None):
data, masks = _get_data_and_masks(arr, values)
return data, masks, dict(axis=axis), None
@dispatched_function
def block(arrays):
# We need to override block since the numpy implementation can take two
# different paths, one for concatenation, one for creating a large empty
# result array in which parts are set. Each assumes array input and
# cannot be used directly. Since it would be very costly to inspect all
# arrays and then turn them back into a nested list, we just copy here the
# second implementation, np.core.shape_base._block_slicing, since it is
# shortest and easiest.
from astropy.utils.masked import Masked
(arrays, list_ndim, result_ndim,
final_size) = np.core.shape_base._block_setup(arrays)
shape, slices, arrays = np.core.shape_base._block_info_recursion(
arrays, list_ndim, result_ndim)
dtype = np.result_type(*[arr.dtype for arr in arrays])
F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
order = 'F' if F_order and not C_order else 'C'
result = Masked(np.empty(shape=shape, dtype=dtype, order=order))
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result
@dispatched_function
def broadcast_arrays(*args, subok=True):
"""Broadcast arrays to a common shape.
Like `numpy.broadcast_arrays`, applied to both unmasked data and masks.
Note that ``subok`` is taken to mean whether or not subclasses of
the unmasked data and masks are allowed, i.e., for ``subok=False``,
`~astropy.utils.masked.MaskedNDArray` instances will be returned.
"""
from .core import Masked
are_masked = [isinstance(arg, Masked) for arg in args]
data = [(arg.unmasked if is_masked else arg)
for arg, is_masked in zip(args, are_masked)]
results = np.broadcast_arrays(*data, subok=subok)
shape = results[0].shape if isinstance(results, list) else results.shape
masks = [(np.broadcast_to(arg.mask, shape, subok=subok)
if is_masked else None)
for arg, is_masked in zip(args, are_masked)]
results = [(Masked(result, mask) if mask is not None else result)
for (result, mask) in zip(results, masks)]
return results if len(results) > 1 else results[0]
@apply_to_both
def insert(arr, obj, values, axis=None):
"""Insert values along the given axis before the given indices.
Like `numpy.insert` but for possibly masked ``arr`` and ``values``.
Masked ``obj`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(obj, Masked) or not isinstance(arr, Masked):
raise NotImplementedError
(arr_data, val_data), (arr_mask, val_mask) = _get_data_and_masks(arr, values)
return ((arr_data, obj, val_data, axis),
(arr_mask, obj, val_mask, axis), {}, None)
if NUMPY_LT_1_19:
@dispatched_function
def count_nonzero(a, axis=None):
"""Counts the number of non-zero values in the array ``a``.
Like `numpy.count_nonzero`, with masked values counted as 0 or `False`.
"""
filled = a.filled(np.zeros((), a.dtype))
return np.count_nonzero(filled, axis)
else:
@dispatched_function
def count_nonzero(a, axis=None, *, keepdims=False):
"""Counts the number of non-zero values in the array ``a``.
Like `numpy.count_nonzero`, with masked values counted as 0 or `False`.
"""
filled = a.filled(np.zeros((), a.dtype))
return np.count_nonzero(filled, axis, keepdims=keepdims)
if NUMPY_LT_1_19:
def _zeros_like(a, dtype=None, order='K', subok=True, shape=None):
if shape != ():
return np.zeros_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
else:
return np.zeros_like(a, dtype=dtype, order=order, subok=subok,
shape=(1,))[0]
else:
_zeros_like = np.zeros_like
def _masked_median_1d(a, overwrite_input):
# TODO: need an in-place mask-sorting option.
unmasked = a.unmasked[~a.mask]
if unmasked.size:
return a.from_unmasked(
np.median(unmasked, overwrite_input=overwrite_input))
else:
return a.from_unmasked(_zeros_like(a.unmasked, shape=(1,))[0], mask=True)
def _masked_median(a, axis=None, out=None, overwrite_input=False):
# As for np.nanmedian, but without a fast option as yet.
if axis is None or a.ndim == 1:
part = a.ravel()
result = _masked_median_1d(part, overwrite_input)
else:
result = np.apply_along_axis(_masked_median_1d, axis, a, overwrite_input)
if out is not None:
out[...] = result
return result
@dispatched_function
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
from astropy.utils.masked import Masked
if out is not None and not isinstance(out, Masked):
raise NotImplementedError
a = Masked(a)
r, k = np.lib.function_base._ureduce(
a, func=_masked_median, axis=axis, out=out,
overwrite_input=overwrite_input)
return (r.reshape(k) if keepdims else r) if out is None else out
def _masked_quantile_1d(a, q, **kwargs):
"""
Private function for rank 1 arrays. Compute quantile ignoring NaNs.
See nanpercentile for parameter usage
"""
unmasked = a.unmasked[~a.mask]
if unmasked.size:
result = np.lib.function_base._quantile_unchecked(unmasked, q, **kwargs)
return a.from_unmasked(result)
else:
return a.from_unmasked(_zeros_like(a.unmasked, shape=q.shape), True)
def _masked_quantile(a, q, axis=None, out=None, **kwargs):
# As for np.nanmedian, but without a fast option as yet.
if axis is None or a.ndim == 1:
part = a.ravel()
result = _masked_quantile_1d(part, q, **kwargs)
else:
result = np.apply_along_axis(_masked_quantile_1d, axis, a, q, **kwargs)
# apply_along_axis fills in collapsed axis with results.
# Move that axis to the beginning to match percentile's
# convention.
if q.ndim != 0:
result = np.moveaxis(result, axis, 0)
if out is not None:
out[...] = result
return result
@dispatched_function
def quantile(a, q, axis=None, out=None, **kwargs):
from astropy.utils.masked import Masked
if isinstance(q, Masked) or out is not None and not isinstance(out, Masked):
raise NotImplementedError
a = Masked(a)
q = np.asanyarray(q)
if not np.lib.function_base._quantile_is_valid(q):
raise ValueError("Quantiles must be in the range [0, 1]")
keepdims = kwargs.pop('keepdims', False)
r, k = np.lib.function_base._ureduce(
a, func=_masked_quantile, q=q, axis=axis, out=out, **kwargs)
return (r.reshape(k) if keepdims else r) if out is None else out
@dispatched_function
def percentile(a, q, *args, **kwargs):
q = np.true_divide(q, 100)
return quantile(a, q, *args, **kwargs)
@dispatched_function
def array_equal(a1, a2, equal_nan=False):
(a1d, a2d), (a1m, a2m) = _get_data_and_masks(a1, a2)
if a1d.shape != a2d.shape:
return False
equal = (a1d == a2d)
if equal_nan:
equal |= np.isnan(a1d) & np.isnan(a2d)
return bool((equal | a1m | a2m).all())
@dispatched_function
def array_equiv(a1, a2):
return bool((a1 == a2).all())
@dispatched_function
def where(condition, *args):
from astropy.utils.masked import Masked
if not args:
return condition.nonzero(), None, None
condition, c_mask = Masked._get_data_and_mask(condition)
data, masks = _get_data_and_masks(*args)
unmasked = np.where(condition, *data)
mask = np.where(condition, *masks)
if c_mask is not None:
mask |= c_mask
return Masked(unmasked, mask=mask)
@dispatched_function
def choose(a, choices, out=None, mode='raise'):
"""Construct an array from an index array and a set of arrays to choose from.
Like `numpy.choose`. Masked indices in ``a`` will lead to masked output
values and underlying data values are ignored if out of bounds (for
``mode='raise'``). Any values masked in ``choices`` will be propagated
if chosen.
"""
from astropy.utils.masked import Masked
a_data, a_mask = Masked._get_data_and_mask(a)
if a_mask is not None and mode == 'raise':
# Avoid raising on masked indices.
a_data = a.filled(fill_value=0)
kwargs = {'mode': mode}
if out is not None:
if not isinstance(out, Masked):
raise NotImplementedError
kwargs['out'] = out.unmasked
data, masks = _get_data_and_masks(*choices)
data_chosen = np.choose(a_data, data, **kwargs)
if out is not None:
kwargs['out'] = out.mask
mask_chosen = np.choose(a_data, masks, **kwargs)
if a_mask is not None:
mask_chosen |= a_mask
return Masked(data_chosen, mask_chosen) if out is None else out
@apply_to_both
def select(condlist, choicelist, default=0):
"""Return an array drawn from elements in choicelist, depending on conditions.
Like `numpy.select`, with masks in ``choicelist`` are propagated.
Any masks in ``condlist`` are ignored.
"""
from astropy.utils.masked import Masked
condlist = [c.unmasked if isinstance(c, Masked) else c
for c in condlist]
data_list, mask_list = _get_data_and_masks(*choicelist)
default = Masked(default) if default is not np.ma.masked else Masked(0, mask=True)
return ((condlist, data_list, default.unmasked),
(condlist, mask_list, default.mask), {}, None)
@dispatched_function
def piecewise(x, condlist, funclist, *args, **kw):
"""Evaluate a piecewise-defined function.
Like `numpy.piecewise` but for masked input array ``x``.
Any masks in ``condlist`` are ignored.
"""
# Copied implementation from numpy.lib.function_base.piecewise,
# just to ensure output is Masked.
n2 = len(funclist)
# undocumented: single condition is promoted to a list of one condition
if np.isscalar(condlist) or (
not isinstance(condlist[0], (list, np.ndarray))
and x.ndim != 0): # pragma: no cover
condlist = [condlist]
condlist = np.array(condlist, dtype=bool)
n = len(condlist)
if n == n2 - 1: # compute the "otherwise" condition.
condelse = ~np.any(condlist, axis=0, keepdims=True)
condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
elif n != n2:
raise ValueError(
f"with {n} condition(s), either {n} or {n + 1} functions are expected"
)
# The one real change...
y = np.zeros_like(x)
where = []
what = []
for k in range(n):
item = funclist[k]
if not callable(item):
where.append(condlist[k])
what.append(item)
else:
vals = x[condlist[k]]
if vals.size > 0:
where.append(condlist[k])
what.append(item(vals, *args, **kw))
for item, value in zip(where, what):
y[item] = value
return y
@dispatched_function
def interp(x, xp, fp, *args, **kwargs):
"""One-dimensional linear interpolation.
Like `numpy.interp`, but any masked points in ``xp`` and ``fp``
are ignored. Any masked values in ``x`` will still be evaluated,
but masked on output.
"""
from astropy.utils.masked import Masked
xd, xm = Masked._get_data_and_mask(x)
if isinstance(xp, Masked) or isinstance(fp, Masked):
(xp, fp), (xpm, fpm) = _get_data_and_masks(xp, fp)
if xp.ndim == fp.ndim == 1:
# Avoid making arrays 1-D; will just raise below.
m = xpm | fpm
xp = xp[~m]
fp = fp[~m]
result = np.interp(xd, xp, fp, *args, **kwargs)
return result if xm is None else Masked(result, xm.copy())
@dispatched_function
def lexsort(keys, axis=-1):
"""Perform an indirect stable sort using a sequence of keys.
Like `numpy.lexsort` but for possibly masked ``keys``. Masked
values are sorted towards the end for each key.
"""
# Sort masks to the end.
from .core import Masked
new_keys = []
for key in keys:
if isinstance(key, Masked):
# If there are other keys below, want to be sure that
# for masked values, those other keys set the order.
new_key = key.unmasked
if new_keys and key.mask.any():
new_key = new_key.copy()
new_key[key.mask] = new_key.flat[0]
new_keys.extend([new_key, key.mask])
else:
new_keys.append(key)
return np.lexsort(new_keys, axis=axis)
@dispatched_function
def apply_over_axes(func, a, axes):
# Copied straight from numpy/lib/shape_base, just to omit its
# val = asarray(a); if only it had been asanyarray, or just not there
# since a is assumed to an an array in the next line...
# Which is what we do here - we can only get here if it is Masked.
val = a
N = a.ndim
if np.array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = np.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
class MaskedFormat:
"""Formatter for masked array scalars.
For use in `numpy.array2string`, wrapping the regular formatters such
that if a value is masked, its formatted string is replaced.
Typically initialized using the ``from_data`` class method.
"""
def __init__(self, format_function):
self.format_function = format_function
# Special case for structured void: we need to make all the
# format functions for the items masked as well.
# TODO: maybe is a separate class is more logical?
ffs = getattr(format_function, 'format_functions', None)
if ffs:
self.format_function.format_functions = [MaskedFormat(ff) for ff in ffs]
def __call__(self, x):
if x.dtype.names:
# The replacement of x with a list is needed because the function
# inside StructuredVoidFormat iterates over x, which works for an
# np.void but not an array scalar.
return self.format_function([x[field] for field in x.dtype.names])
string = self.format_function(x.unmasked[()])
if x.mask:
# Strikethrough would be neat, but terminal needs a different
# formatting than, say, jupyter notebook.
# return "\x1B[9m"+string+"\x1B[29m"
# return ''.join(s+'\u0336' for s in string)
n = min(3, max(1, len(string)))
return ' ' * (len(string)-n) + '\u2014' * n
else:
return string
@classmethod
def from_data(cls, data, **options):
from numpy.core.arrayprint import _get_format_function
return cls(_get_format_function(data, **options))
def _array2string(a, options, separator=' ', prefix=""):
# Mostly copied from numpy.core.arrayprint, except:
# - The format function is wrapped in a mask-aware class;
# - Arrays scalars are not cast as arrays.
from numpy.core.arrayprint import _leading_trailing, _formatArray
data = np.asarray(a)
if a.size > options['threshold']:
summary_insert = "..."
data = _leading_trailing(data, options['edgeitems'])
else:
summary_insert = ""
# find the right formatting function for the array
format_function = MaskedFormat.from_data(data, **options)
# skip over "["
next_line_prefix = " "
# skip over array(
next_line_prefix += " "*len(prefix)
lst = _formatArray(a, format_function, options['linewidth'],
next_line_prefix, separator, options['edgeitems'],
summary_insert, options['legacy'])
return lst
@dispatched_function
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=np._NoValue, formatter=None, threshold=None,
edgeitems=None, sign=None, floatmode=None, suffix=""):
# Copied from numpy.core.arrayprint, but using _array2string above.
from numpy.core.arrayprint import _make_options_dict, _format_options
overrides = _make_options_dict(precision, threshold, edgeitems,
max_line_width, suppress_small, None, None,
sign, formatter, floatmode)
options = _format_options.copy()
options.update(overrides)
options['linewidth'] -= len(suffix)
# treat as a null array if any of shape elements == 0
if a.size == 0:
return "[]"
return _array2string(a, options, separator, prefix)
@dispatched_function
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
# Override to avoid special treatment of array scalars.
return array2string(a, max_line_width, precision, suppress_small, ' ', "")
# For the nanfunctions, we just treat any nan as an additional mask.
_nanfunc_fill_values = {'nansum': 0, 'nancumsum': 0,
'nanprod': 1, 'nancumprod': 1}
def masked_nanfunc(nanfuncname):
np_func = getattr(np, nanfuncname[3:])
fill_value = _nanfunc_fill_values.get(nanfuncname, None)
def nanfunc(a, *args, **kwargs):
from astropy.utils.masked import Masked
a, mask = Masked._get_data_and_mask(a)
if issubclass(a.dtype.type, np.inexact):
nans = np.isnan(a)
mask = nans if mask is None else (nans | mask)
if mask is not None:
a = Masked(a, mask)
if fill_value is not None:
a = a.filled(fill_value)
return np_func(a, *args, **kwargs)
doc = f"Like `numpy.{nanfuncname}`, skipping masked values as well.\n\n"
if fill_value is not None:
# sum, cumsum, prod, cumprod
doc += (f"Masked/NaN values are replaced with {fill_value}. "
"The output is not masked.")
elif "arg" in nanfuncname:
doc += ("No exceptions are raised for fully masked/NaN slices.\n"
"Instead, these give index 0.")
else:
doc += ("No warnings are given for fully masked/NaN slices.\n"
"Instead, they are masked in the output.")
nanfunc.__doc__ = doc
nanfunc.__name__ = nanfuncname
return nanfunc
for nanfuncname in np.lib.nanfunctions.__all__:
globals()[nanfuncname] = dispatched_function(masked_nanfunc(nanfuncname),
helps=getattr(np, nanfuncname))
# Add any dispatched or helper function that has a docstring to
# __all__, so they will be typeset by sphinx. The logic is that for
# those presumably the use of the mask is not entirely obvious.
__all__ += sorted(helper.__name__ for helper in (
set(APPLY_TO_BOTH_FUNCTIONS.values())
| set(DISPATCHED_FUNCTIONS.values())) if helper.__doc__)
|
5fb9a584a384755bc8f6e100371204894c91cae01fee2837cc1a21bf41c893c6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Checks for optional dependencies using lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
"""
import importlib
import warnings
# First, the top-level packages:
# TODO: This list is a duplicate of the dependencies in setup.cfg "all", but
# some of the package names are different from the pip-install name (e.g.,
# beautifulsoup4 -> bs4).
_optional_deps = ['asdf', 'asdf_astropy', 'bleach', 'bottleneck', 'bs4', 'bz2', 'h5py',
'html5lib', 'IPython', 'jplephem', 'lxml', 'matplotlib',
'mpmath', 'pandas', 'PIL', 'pytz', 'scipy', 'skyfield',
'sortedcontainers', 'lzma', 'pyarrow']
_formerly_optional_deps = ['yaml'] # for backward compatibility
_deps = {k.upper(): k for k in _optional_deps + _formerly_optional_deps}
# Any subpackages that have different import behavior:
_deps['PLT'] = 'matplotlib.pyplot'
__all__ = [f"HAS_{pkg}" for pkg in _deps]
def __getattr__(name):
if name in __all__:
module_name = name[4:]
if module_name == "YAML":
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
"PyYaml is now a strict dependency. HAS_YAML is deprecated as "
"of v5.0 and will be removed in a subsequent version.",
category=AstropyDeprecationWarning)
try:
importlib.import_module(_deps[module_name])
except (ImportError, ModuleNotFoundError):
return False
return True
raise AttributeError(f"Module {__name__!r} has no attribute {name!r}.")
|
bc96d165397cb6ab3d8c7af5fa7412f911c277e41fab17406cb3fd37e2d97750 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import warnings
from pathlib import Path
import pytest
import numpy as np
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.data import get_pkg_data_filename
from astropy.config import set_temp_cache
from astropy.utils.iers import iers
from astropy import units as u
from astropy.table import QTable
from astropy.time import Time, TimeDelta
CI = os.environ.get('CI', False)
FILE_NOT_FOUND_ERROR = getattr(__builtins__, 'FileNotFoundError', OSError)
try:
iers.IERS_A.open('finals2000A.all') # check if IERS_A is available
except OSError:
HAS_IERS_A = False
else:
HAS_IERS_A = True
IERS_A_EXCERPT = get_pkg_data_filename(os.path.join('data', 'iers_a_excerpt'))
def setup_module():
# Need auto_download so that IERS_B won't be loaded and cause tests to
# fail. Files to be downloaded are handled appropriately in the tests.
iers.conf.auto_download = True
def teardown_module():
# This setting is to be consistent with astropy/conftest.py
iers.conf.auto_download = False
class TestBasic():
"""Basic tests that IERS_B returns correct values"""
@pytest.mark.parametrize('iers_cls', (iers.IERS_B, iers.IERS))
def test_simple(self, iers_cls):
"""Test the default behaviour for IERS_B and IERS."""
# Arguably, IERS itself should not be used at all, but it used to
# provide IERS_B by default so we check that it continues to do so.
# Eventually, IERS should probably be deprecated.
iers_cls.close()
assert iers_cls.iers_table is None
iers_tab = iers_cls.open()
assert iers_cls.iers_table is not None
assert iers_cls.iers_table is iers_tab
assert isinstance(iers_tab, QTable)
assert isinstance(iers_tab, iers.IERS_B)
assert (iers_tab['UT1_UTC'].unit / u.second).is_unity()
assert (iers_tab['PM_x'].unit / u.arcsecond).is_unity()
assert (iers_tab['PM_y'].unit / u.arcsecond).is_unity()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc = iers_tab.ut1_utc(jd1, jd2)
assert isinstance(ut1_utc, u.Quantity)
assert (ut1_utc.unit / u.second).is_unity()
# IERS files change at the 0.1 ms level; see gh-6981
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
# should be future-proof; surely we've moved to another planet by then
with pytest.raises(IndexError):
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0.)
# also check it returns the right status
ut1_utc2, status2 = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status2 == iers.FROM_IERS_B)
ut1_utc4, status4 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status4 == iers.TIME_BEYOND_IERS_RANGE
# check it works via Time too
t = Time(jd1, jd2, format='jd', scale='utc')
ut1_utc3 = iers_tab.ut1_utc(t)
assert_quantity_allclose(ut1_utc3, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
def test_open_filename(self):
iers.IERS_B.close()
iers.IERS_B.open(iers.IERS_B_FILE)
assert iers.IERS_B.iers_table is not None
assert isinstance(iers.IERS_B.iers_table, QTable)
iers.IERS_B.close()
with pytest.raises(FILE_NOT_FOUND_ERROR):
iers.IERS_B.open('surely this does not exist')
def test_open_network_url(self):
iers.IERS_A.close()
iers.IERS_A.open(Path(IERS_A_EXCERPT).as_uri())
assert iers.IERS_A.iers_table is not None
assert isinstance(iers.IERS_A.iers_table, QTable)
iers.IERS_A.close()
class TestIERS_AExcerpt():
def test_simple(self):
# Test the IERS A reader. It is also a regression tests that ensures
# values do not get overridden by IERS B; see #4933.
iers_tab = iers.IERS_A.open(IERS_A_EXCERPT)
assert (iers_tab['UT1_UTC'].unit / u.second).is_unity()
assert 'P' in iers_tab['UT1Flag']
assert 'I' in iers_tab['UT1Flag']
assert 'B' in iers_tab['UT1Flag']
assert np.all((iers_tab['UT1Flag'] == 'I') |
(iers_tab['UT1Flag'] == 'P') |
(iers_tab['UT1Flag'] == 'B'))
assert (iers_tab['dX_2000A'].unit / u.marcsec).is_unity()
assert (iers_tab['dY_2000A'].unit / u.marcsec).is_unity()
assert 'P' in iers_tab['NutFlag']
assert 'I' in iers_tab['NutFlag']
assert 'B' in iers_tab['NutFlag']
assert np.all((iers_tab['NutFlag'] == 'P') |
(iers_tab['NutFlag'] == 'I') |
(iers_tab['NutFlag'] == 'B'))
assert (iers_tab['PM_x'].unit / u.arcsecond).is_unity()
assert (iers_tab['PM_y'].unit / u.arcsecond).is_unity()
assert 'P' in iers_tab['PolPMFlag']
assert 'I' in iers_tab['PolPMFlag']
assert 'B' in iers_tab['PolPMFlag']
assert np.all((iers_tab['PolPMFlag'] == 'P') |
(iers_tab['PolPMFlag'] == 'I') |
(iers_tab['PolPMFlag'] == 'B'))
t = Time([57053., 57054., 57055.], format='mjd')
ut1_utc, status = iers_tab.ut1_utc(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
assert_quantity_allclose(ut1_utc,
[-0.4916557, -0.4925323, -0.4934373] * u.s,
atol=0.1*u.ms)
dcip_x, dcip_y, status = iers_tab.dcip_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
print(dcip_x)
print(dcip_y)
assert_quantity_allclose(dcip_x,
[-0.086, -0.093, -0.087] * u.marcsec,
atol=1.*u.narcsec)
assert_quantity_allclose(dcip_y,
[0.094, 0.081, 0.072] * u.marcsec,
atol=1*u.narcsec)
pm_x, pm_y, status = iers_tab.pm_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
assert_quantity_allclose(pm_x,
[0.003734, 0.004581, 0.004623] * u.arcsec,
atol=0.1*u.marcsec)
assert_quantity_allclose(pm_y,
[0.310824, 0.313150, 0.315517] * u.arcsec,
atol=0.1*u.marcsec)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
@pytest.mark.skipif('not HAS_IERS_A')
class TestIERS_A():
def test_simple(self):
"""Test that open() by default reads a 'finals2000A.all' file."""
# Ensure we remove any cached table (gh-5131).
iers.IERS_A.close()
iers_tab = iers.IERS_A.open()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc, status = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status == iers.FROM_IERS_B)
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status2 == iers.TIME_BEYOND_IERS_RANGE
tnow = Time.now()
ut1_utc3, status3 = iers_tab.ut1_utc(tnow, return_status=True)
assert status3 == iers.FROM_IERS_A_PREDICTION
assert ut1_utc3 != 0.
class TestIERS_Auto():
def setup_class(self):
"""Set up useful data for the tests.
"""
self.N = 40
self.ame = 30.0
self.iers_a_file_1 = get_pkg_data_filename(
os.path.join('data', 'finals2000A-2016-02-30-test'))
self.iers_a_file_2 = get_pkg_data_filename(
os.path.join('data', 'finals2000A-2016-04-30-test'))
self.iers_a_url_1 = Path(self.iers_a_file_1).as_uri()
self.iers_a_url_2 = Path(self.iers_a_file_2).as_uri()
self.t = Time.now() + TimeDelta(10, format='jd') * np.arange(self.N)
def teardown_method(self, method):
"""Run this after every test.
"""
iers.IERS_Auto.close()
def test_interpolate_error_formatting(self):
"""Regression test: make sure the error message in
IERS_Auto._check_interpolate_indices() is formatted correctly.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('iers_auto_url_mirror', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', self.ame):
with pytest.raises(ValueError) as err:
iers_table = iers.IERS_Auto.open()
with warnings.catch_warnings():
# Ignoring this if it comes up -- IERS_Auto predictive
# values are older than 30.0 days but downloading the
# latest table did not find newer values
warnings.simplefilter('ignore', iers.IERSStaleWarning)
iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert str(err.value) == iers.INTERPOLATE_ERROR.format(self.ame)
def test_auto_max_age_none(self):
"""Make sure that iers.INTERPOLATE_ERROR's advice about setting
auto_max_age = None actually works.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', None):
iers_table = iers.IERS_Auto.open()
delta = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert isinstance(delta, np.ndarray)
assert delta.shape == (self.N,)
assert_quantity_allclose(delta, np.array([-0.2246227]*self.N)*u.s)
def test_auto_max_age_minimum(self):
"""Check that the minimum auto_max_age is enforced.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', 5.0):
with pytest.raises(ValueError) as err:
iers_table = iers.IERS_Auto.open()
_ = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert str(err.value) == 'IERS auto_max_age configuration value must be larger than 10 days'
def test_no_auto_download(self):
with iers.conf.set_temp('auto_download', False):
t = iers.IERS_Auto.open()
assert type(t) is iers.IERS_B
@pytest.mark.remote_data
def test_simple(self):
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
dat = iers.IERS_Auto.open()
assert dat['MJD'][0] == 57359.0 * u.d
assert dat['MJD'][-1] == 57539.0 * u.d
# Pretend we are accessing at a time 7 days after start of predictive data
predictive_mjd = dat.meta['predictive_mjd']
dat._time_now = Time(predictive_mjd, format='mjd') + 7 * u.d
# Look at times before and after the test file begins. 0.1292905 is
# the IERS-B value from MJD=57359. The value in
# finals2000A-2016-02-30-test has been replaced at this point.
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1293286)
assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.2246227)
# Now pretend we are accessing at time 60 days after start of predictive data.
# There will be a warning when downloading the file doesn't give new data
# and an exception when extrapolating into the future with insufficient data.
dat._time_now = Time(predictive_mjd, format='mjd') + 60 * u.d
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1293286)
with pytest.warns(iers.IERSStaleWarning, match='IERS_Auto predictive '
'values are older') as warns, \
pytest.raises(ValueError, match='interpolating from IERS_Auto '
'using predictive values'):
dat.ut1_utc(Time(60000, format='mjd').jd)
assert len(warns) == 1
# Warning only if we are getting return status
with pytest.warns(iers.IERSStaleWarning, match='IERS_Auto '
'predictive values are older') as warns:
dat.ut1_utc(Time(60000, format='mjd').jd, return_status=True)
assert len(warns) == 1
# Now set auto_max_age = None which says that we don't care how old the
# available IERS-A file is. There should be no warnings or exceptions.
with iers.conf.set_temp('auto_max_age', None):
dat.ut1_utc(Time(60000, format='mjd').jd)
# Now point to a later file with same values but MJD increased by
# 60 days and see that things work. dat._time_now is still the same value
# as before, i.e. right around the start of predictive values for the new file.
# (In other words this is like downloading the latest file online right now).
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_2):
# Look at times before and after the test file begins. This forces a new download.
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1293286)
assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.3)
# Now the time range should be different.
assert dat['MJD'][0] == 57359.0 * u.d
assert dat['MJD'][-1] == (57539.0 + 60) * u.d
@pytest.mark.remote_data
def test_IERS_B_parameters_loading_into_IERS_Auto():
A = iers.IERS_Auto.open()
B = iers.IERS_B.open()
ok_A = A["MJD"] <= B["MJD"][-1]
assert not np.all(ok_A), "IERS B covers all of IERS A: should not happen"
# We only overwrite IERS_B values in the IERS_A table that were already
# there in the first place. Better take that into account.
ok_A &= np.isfinite(A["UT1_UTC_B"])
i_B = np.searchsorted(B["MJD"], A["MJD"][ok_A])
assert np.all(np.diff(i_B) == 1), "Valid region not contiguous"
assert np.all(A["MJD"][ok_A] == B["MJD"][i_B])
# Check that values are copied correctly. Since units are not
# necessarily the same, we use allclose with very strict tolerance.
for name in ("UT1_UTC", "PM_x", "PM_y", "dX_2000A", "dY_2000A"):
assert_quantity_allclose(
A[name][ok_A], B[name][i_B], rtol=1e-15,
err_msg=("Bug #9206 IERS B parameter {} not copied over "
"correctly to IERS Auto".format(name)))
# Issue with FTP, rework test into previous one when it's fixed
@pytest.mark.skipif("CI", reason="Flaky on CI")
@pytest.mark.remote_data
def test_iers_a_dl():
iersa_tab = iers.IERS_A.open(iers.IERS_A_URL, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersa_tab) > 0
assert 'UT1_UTC_A' in iersa_tab.colnames
finally:
iers.IERS_A.close()
@pytest.mark.remote_data
def test_iers_a_dl_mirror():
iersa_tab = iers.IERS_A.open(iers.IERS_A_URL_MIRROR, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersa_tab) > 0
assert 'UT1_UTC_A' in iersa_tab.colnames
finally:
iers.IERS_A.close()
@pytest.mark.remote_data
def test_iers_b_dl():
iersb_tab = iers.IERS_B.open(iers.IERS_B_URL, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersb_tab) > 0
assert 'UT1_UTC' in iersb_tab.colnames
finally:
iers.IERS_B.close()
@pytest.mark.remote_data
def test_iers_out_of_range_handling(tmpdir):
# Make sure we don't have IERS-A data available anywhere
with set_temp_cache(tmpdir):
iers.IERS_A.close()
iers.IERS_Auto.close()
iers.IERS.close()
now = Time.now()
with iers.conf.set_temp('auto_download', False):
# Should be fine with built-in IERS_B
(now - 300 * u.day).ut1
# Default is to raise an error
match = r'\(some\) times are outside of range covered by IERS table'
with pytest.raises(iers.IERSRangeError, match=match):
(now + 100 * u.day).ut1
with iers.conf.set_temp('iers_degraded_accuracy', 'warn'):
with pytest.warns(iers.IERSDegradedAccuracyWarning, match=match):
(now + 100 * u.day).ut1
with iers.conf.set_temp('iers_degraded_accuracy', 'ignore'):
(now + 100 * u.day).ut1
@pytest.mark.remote_data
def test_iers_download_error_handling(tmpdir):
# Make sure we don't have IERS-A data available anywhere
with set_temp_cache(tmpdir):
iers.IERS_A.close()
iers.IERS_Auto.close()
iers.IERS.close()
now = Time.now()
# bad site name
with iers.conf.set_temp('iers_auto_url', 'FAIL FAIL'):
# site that exists but doesn't have IERS data
with iers.conf.set_temp('iers_auto_url_mirror', 'https://google.com'):
with pytest.warns(iers.IERSWarning) as record:
with iers.conf.set_temp('iers_degraded_accuracy', 'ignore'):
(now + 100 * u.day).ut1
assert len(record) == 3
assert str(record[0].message).startswith(
'failed to download FAIL FAIL: Malformed URL')
assert str(record[1].message).startswith(
'malformed IERS table from https://google.com')
assert str(record[2].message).startswith(
'unable to download valid IERS file, using local IERS-B')
|
5c282b5f773da980a6d876f9886aea055c2a99bf08cf61885dafcf1f7380ea79 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test masked class initialization, methods, and operators.
Functions, including ufuncs, are tested in test_functions.py
"""
import operator
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from astropy import units as u
from astropy.units import Quantity
from astropy.coordinates import Longitude
from astropy.utils.masked import Masked, MaskedNDArray
from astropy.utils.compat import NUMPY_LT_1_20
def assert_masked_equal(a, b):
assert_array_equal(a.unmasked, b.unmasked)
assert_array_equal(a.mask, b.mask)
VARIOUS_ITEMS = [
(1, 1),
slice(None, 1),
(),
1]
class ArraySetup:
_data_cls = np.ndarray
@classmethod
def setup_class(self):
self.a = np.arange(6.).reshape(2, 3)
self.mask_a = np.array([[True, False, False],
[False, True, False]])
self.b = np.array([-3., -2., -1.])
self.mask_b = np.array([False, True, False])
self.c = np.array([[0.25], [0.5]])
self.mask_c = np.array([[False], [True]])
self.sdt = np.dtype([('a', 'f8'), ('b', 'f8')])
self.mask_sdt = np.dtype([('a', '?'), ('b', '?')])
self.sa = np.array([[(1., 2.), (3., 4.)],
[(11., 12.), (13., 14.)]], dtype=self.sdt)
self.mask_sa = np.array([[(True, True), (False, False)],
[(False, True), (True, False)]],
dtype=self.mask_sdt)
self.sb = np.array([(1., 2.), (-3., 4.)], dtype=self.sdt)
self.mask_sb = np.array([(True, False), (False, False)],
dtype=self.mask_sdt)
class QuantitySetup(ArraySetup):
_data_cls = Quantity
@classmethod
def setup_class(self):
super().setup_class()
self.a = Quantity(self.a, u.m)
self.b = Quantity(self.b, u.cm)
self.c = Quantity(self.c, u.km)
self.sa = Quantity(self.sa, u.m, dtype=self.sdt)
self.sb = Quantity(self.sb, u.cm, dtype=self.sdt)
class LongitudeSetup(ArraySetup):
_data_cls = Longitude
@classmethod
def setup_class(self):
super().setup_class()
self.a = Longitude(self.a, u.deg)
self.b = Longitude(self.b, u.deg)
self.c = Longitude(self.c, u.deg)
# Note: Longitude does not work on structured arrays, so
# leaving it as regular array (which just reruns some tests).
class TestMaskedArrayInitialization(ArraySetup):
def test_simple(self):
ma = Masked(self.a, mask=self.mask_a)
assert isinstance(ma, np.ndarray)
assert isinstance(ma, type(self.a))
assert isinstance(ma, Masked)
assert_array_equal(ma.unmasked, self.a)
assert_array_equal(ma.mask, self.mask_a)
assert ma.mask is not self.mask_a
assert np.may_share_memory(ma.mask, self.mask_a)
def test_structured(self):
ma = Masked(self.sa, mask=self.mask_sa)
assert isinstance(ma, np.ndarray)
assert isinstance(ma, type(self.sa))
assert isinstance(ma, Masked)
assert_array_equal(ma.unmasked, self.sa)
assert_array_equal(ma.mask, self.mask_sa)
assert ma.mask is not self.mask_sa
assert np.may_share_memory(ma.mask, self.mask_sa)
def test_masked_ndarray_init():
# Note: as a straight ndarray subclass, MaskedNDArray passes on
# the arguments relevant for np.ndarray, not np.array.
a_in = np.arange(3, dtype=int)
m_in = np.array([True, False, False])
buff = a_in.tobytes()
# Check we're doing things correctly using regular ndarray.
a = np.ndarray(shape=(3,), dtype=int, buffer=buff)
assert_array_equal(a, a_in)
# Check with and without mask.
ma = MaskedNDArray((3,), dtype=int, mask=m_in, buffer=buff)
assert_array_equal(ma.unmasked, a_in)
assert_array_equal(ma.mask, m_in)
ma = MaskedNDArray((3,), dtype=int, buffer=buff)
assert_array_equal(ma.unmasked, a_in)
assert_array_equal(ma.mask, np.zeros(3, bool))
def test_cannot_initialize_with_masked():
with pytest.raises(ValueError, match='cannot handle np.ma.masked'):
Masked(np.ma.masked)
def test_cannot_just_use_anything_with_a_mask_attribute():
class my_array(np.ndarray):
mask = True
a = np.array([1., 2.]).view(my_array)
with pytest.raises(AttributeError, match='unmasked'):
Masked(a)
class TestMaskedClassCreation:
"""Try creating a MaskedList and subclasses.
By no means meant to be realistic, just to check that the basic
machinery allows it.
"""
@classmethod
def setup_class(self):
self._base_classes_orig = Masked._base_classes.copy()
self._masked_classes_orig = Masked._masked_classes.copy()
class MaskedList(Masked, list, base_cls=list, data_cls=list):
def __new__(cls, *args, mask=None, copy=False, **kwargs):
self = super().__new__(cls)
self._unmasked = self._data_cls(*args, **kwargs)
self.mask = mask
return self
# Need to have shape for basics to work.
@property
def shape(self):
return (len(self._unmasked),)
self.MaskedList = MaskedList
def teardown_class(self):
Masked._base_classes = self._base_classes_orig
Masked._masked_classes = self._masked_classes_orig
def test_setup(self):
assert issubclass(self.MaskedList, Masked)
assert issubclass(self.MaskedList, list)
assert Masked(list) is self.MaskedList
def test_masked_list(self):
ml = self.MaskedList(range(3), mask=[True, False, False])
assert ml.unmasked == [0, 1, 2]
assert_array_equal(ml.mask, np.array([True, False, False]))
ml01 = ml[:2]
assert ml01.unmasked == [0, 1]
assert_array_equal(ml01.mask, np.array([True, False]))
def test_from_list(self):
ml = Masked([1, 2, 3], mask=[True, False, False])
assert ml.unmasked == [1, 2, 3]
assert_array_equal(ml.mask, np.array([True, False, False]))
def test_masked_list_subclass(self):
class MyList(list):
pass
ml = MyList(range(3))
mml = Masked(ml, mask=[False, True, False])
assert isinstance(mml, Masked)
assert isinstance(mml, MyList)
assert isinstance(mml.unmasked, MyList)
assert mml.unmasked == [0, 1, 2]
assert_array_equal(mml.mask, np.array([False, True, False]))
assert Masked(MyList) is type(mml)
class TestMaskedNDArraySubclassCreation:
"""Test that masked subclasses can be created directly and indirectly."""
@classmethod
def setup_class(self):
class MyArray(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.asanyarray(*args, **kwargs).view(cls)
self.MyArray = MyArray
self.a = np.array([1., 2.]).view(self.MyArray)
self.m = np.array([True, False], dtype=bool)
def teardown_method(self, method):
Masked._masked_classes.pop(self.MyArray, None)
def test_direct_creation(self):
assert self.MyArray not in Masked._masked_classes
mcls = Masked(self.MyArray)
assert issubclass(mcls, Masked)
assert issubclass(mcls, self.MyArray)
assert mcls.__name__ == 'MaskedMyArray'
assert mcls.__doc__.startswith('Masked version of MyArray')
mms = mcls(self.a, mask=self.m)
assert isinstance(mms, mcls)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
def test_initialization_without_mask(self):
# Default for not giving a mask should be False.
mcls = Masked(self.MyArray)
mms = mcls(self.a)
assert isinstance(mms, mcls)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, np.zeros(mms.shape, bool))
@pytest.mark.parametrize('masked_array', [Masked, np.ma.MaskedArray])
def test_initialization_with_masked_values(self, masked_array):
mcls = Masked(self.MyArray)
ma = masked_array(np.asarray(self.a), mask=self.m)
mms = mcls(ma)
assert isinstance(mms, Masked)
assert isinstance(mms, self.MyArray)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
def test_indirect_creation(self):
assert self.MyArray not in Masked._masked_classes
mms = Masked(self.a, mask=self.m)
assert isinstance(mms, Masked)
assert isinstance(mms, self.MyArray)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
assert self.MyArray in Masked._masked_classes
assert Masked(self.MyArray) is type(mms)
def test_can_initialize_with_masked_values(self):
mcls = Masked(self.MyArray)
mms = mcls(Masked(np.asarray(self.a), mask=self.m))
assert isinstance(mms, Masked)
assert isinstance(mms, self.MyArray)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
def test_viewing(self):
mms = Masked(self.a, mask=self.m)
mms2 = mms.view()
assert type(mms2) is mms.__class__
assert_masked_equal(mms2, mms)
ma = mms.view(np.ndarray)
assert type(ma) is MaskedNDArray
assert_array_equal(ma.unmasked, self.a.view(np.ndarray))
assert_array_equal(ma.mask, self.m)
class TestMaskedQuantityInitialization(TestMaskedArrayInitialization, QuantitySetup):
def test_masked_quantity_class_init(self):
# TODO: class definitions should be more easily accessible.
mcls = Masked._masked_classes[self.a.__class__]
# This is not a very careful test.
mq = mcls([1., 2.], mask=[True, False], unit=u.s)
assert mq.unit == u.s
assert np.all(mq.value.unmasked == [1., 2.])
assert np.all(mq.value.mask == [True, False])
assert np.all(mq.mask == [True, False])
def test_masked_quantity_getting(self):
mcls = Masked._masked_classes[self.a.__class__]
MQ = Masked(Quantity)
assert MQ is mcls
def test_initialization_without_mask(self):
# Default for not giving a mask should be False.
MQ = Masked(Quantity)
mq = MQ([1., 2.], u.s)
assert mq.unit == u.s
assert np.all(mq.value.unmasked == [1., 2.])
assert np.all(mq.mask == [False, False])
@pytest.mark.parametrize('masked_array', [Masked, np.ma.MaskedArray])
def test_initialization_with_masked_values(self, masked_array):
MQ = Masked(Quantity)
a = np.array([1., 2.])
m = np.array([True, False])
ma = masked_array(a, m)
mq = MQ(ma)
assert isinstance(mq, Masked)
assert isinstance(mq, Quantity)
assert_array_equal(mq.value.unmasked, a)
assert_array_equal(mq.mask, m)
class TestMaskSetting(ArraySetup):
def test_whole_mask_setting_simple(self):
ma = Masked(self.a)
assert ma.mask.shape == ma.shape
assert not ma.mask.any()
ma.mask = True
assert ma.mask.shape == ma.shape
assert ma.mask.all()
ma.mask = [[True], [False]]
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, np.array([[True] * 3, [False] * 3]))
ma.mask = self.mask_a
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, self.mask_a)
assert ma.mask is not self.mask_a
assert np.may_share_memory(ma.mask, self.mask_a)
def test_whole_mask_setting_structured(self):
ma = Masked(self.sa)
assert ma.mask.shape == ma.shape
assert not ma.mask['a'].any() and not ma.mask['b'].any()
ma.mask = True
assert ma.mask.shape == ma.shape
assert ma.mask['a'].all() and ma.mask['b'].all()
ma.mask = [[True], [False]]
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, np.array(
[[(True, True)] * 2, [(False, False)] * 2], dtype=self.mask_sdt))
ma.mask = self.mask_sa
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, self.mask_sa)
assert ma.mask is not self.mask_sa
assert np.may_share_memory(ma.mask, self.mask_sa)
@pytest.mark.parametrize('item', VARIOUS_ITEMS)
def test_part_mask_setting(self, item):
ma = Masked(self.a)
ma.mask[item] = True
expected = np.zeros(ma.shape, bool)
expected[item] = True
assert_array_equal(ma.mask, expected)
ma.mask[item] = False
assert_array_equal(ma.mask, np.zeros(ma.shape, bool))
# Mask propagation
mask = np.zeros(self.a.shape, bool)
ma = Masked(self.a, mask)
ma.mask[item] = True
assert np.may_share_memory(ma.mask, mask)
assert_array_equal(ma.mask, mask)
@pytest.mark.parametrize('item', ['a'] + VARIOUS_ITEMS)
def test_part_mask_setting_structured(self, item):
ma = Masked(self.sa)
ma.mask[item] = True
expected = np.zeros(ma.shape, self.mask_sdt)
expected[item] = True
assert_array_equal(ma.mask, expected)
ma.mask[item] = False
assert_array_equal(ma.mask, np.zeros(ma.shape, self.mask_sdt))
# Mask propagation
mask = np.zeros(self.sa.shape, self.mask_sdt)
ma = Masked(self.sa, mask)
ma.mask[item] = True
assert np.may_share_memory(ma.mask, mask)
assert_array_equal(ma.mask, mask)
# Following are tests where we trust the initializer works.
class MaskedArraySetup(ArraySetup):
@classmethod
def setup_class(self):
super().setup_class()
self.ma = Masked(self.a, mask=self.mask_a)
self.mb = Masked(self.b, mask=self.mask_b)
self.mc = Masked(self.c, mask=self.mask_c)
self.msa = Masked(self.sa, mask=self.mask_sa)
self.msb = Masked(self.sb, mask=self.mask_sb)
class TestViewing(MaskedArraySetup):
def test_viewing_as_new_type(self):
ma2 = self.ma.view(type(self.ma))
assert_masked_equal(ma2, self.ma)
ma3 = self.ma.view()
assert_masked_equal(ma3, self.ma)
def test_viewing_as_new_dtype(self):
# Not very meaningful, but possible...
ma2 = self.ma.view('c8')
assert_array_equal(ma2.unmasked, self.a.view('c8'))
assert_array_equal(ma2.mask, self.mask_a)
@pytest.mark.parametrize('new_dtype', ['2f4', 'f8,f8,f8'])
def test_viewing_as_new_dtype_not_implemented(self, new_dtype):
# But cannot (yet) view in way that would need to create a new mask,
# even though that view is possible for a regular array.
check = self.a.view(new_dtype)
with pytest.raises(NotImplementedError, match='different.*size'):
self.ma.view(check.dtype)
def test_viewing_as_something_impossible(self):
with pytest.raises(TypeError):
# Use intp to ensure have the same size as object,
# otherwise we get a different error message
Masked(np.array([1, 2], dtype=np.intp)).view(Masked)
class TestMaskedArrayCopyFilled(MaskedArraySetup):
def test_copy(self):
ma_copy = self.ma.copy()
assert type(ma_copy) is type(self.ma)
assert_array_equal(ma_copy.unmasked, self.ma.unmasked)
assert_array_equal(ma_copy.mask, self.ma.mask)
assert not np.may_share_memory(ma_copy.unmasked, self.ma.unmasked)
assert not np.may_share_memory(ma_copy.mask, self.ma.mask)
@pytest.mark.parametrize('fill_value', (0, 1))
def test_filled(self, fill_value):
fill_value = fill_value * getattr(self.a, 'unit', 1)
expected = self.a.copy()
expected[self.ma.mask] = fill_value
result = self.ma.filled(fill_value)
assert_array_equal(expected, result)
def test_filled_no_fill_value(self):
with pytest.raises(TypeError, match='missing 1 required'):
self.ma.filled()
@pytest.mark.parametrize('fill_value', [(0, 1), (-1, -1)])
def test_filled_structured(self, fill_value):
fill_value = np.array(fill_value, dtype=self.sdt)
if hasattr(self.sa, 'unit'):
fill_value = fill_value << self.sa.unit
expected = self.sa.copy()
expected['a'][self.msa.mask['a']] = fill_value['a']
expected['b'][self.msa.mask['b']] = fill_value['b']
result = self.msa.filled(fill_value)
assert_array_equal(expected, result)
def test_flat(self):
ma_copy = self.ma.copy()
ma_flat = ma_copy.flat
# Check that single item keeps class and mask
ma_flat1 = ma_flat[1]
assert ma_flat1.unmasked == self.a.flat[1]
assert ma_flat1.mask == self.mask_a.flat[1]
# As well as getting items via iteration.
assert all((ma.unmasked == a and ma.mask == m) for (ma, a, m)
in zip(self.ma.flat, self.a.flat, self.mask_a.flat))
# check that flat works like a view of the real array
ma_flat[1] = self.b[1]
assert ma_flat[1] == self.b[1]
assert ma_copy[0, 1] == self.b[1]
class TestMaskedQuantityCopyFilled(TestMaskedArrayCopyFilled, QuantitySetup):
pass
class TestMaskedLongitudeCopyFilled(TestMaskedArrayCopyFilled, LongitudeSetup):
pass
class TestMaskedArrayShaping(MaskedArraySetup):
def test_reshape(self):
ma_reshape = self.ma.reshape((6,))
expected_data = self.a.reshape((6,))
expected_mask = self.mask_a.reshape((6,))
assert ma_reshape.shape == expected_data.shape
assert_array_equal(ma_reshape.unmasked, expected_data)
assert_array_equal(ma_reshape.mask, expected_mask)
def test_shape_setting(self):
ma_reshape = self.ma.copy()
ma_reshape.shape = 6,
expected_data = self.a.reshape((6,))
expected_mask = self.mask_a.reshape((6,))
assert ma_reshape.shape == expected_data.shape
assert_array_equal(ma_reshape.unmasked, expected_data)
assert_array_equal(ma_reshape.mask, expected_mask)
def test_shape_setting_failure(self):
ma = self.ma.copy()
with pytest.raises(ValueError, match='cannot reshape'):
ma.shape = 5,
assert ma.shape == self.ma.shape
assert ma.mask.shape == self.ma.shape
# Here, mask can be reshaped but array cannot.
ma2 = Masked(np.broadcast_to([[1.], [2.]], self.a.shape),
mask=self.mask_a)
with pytest.raises(AttributeError, match='ncompatible shape'):
ma2.shape = 6,
assert ma2.shape == self.ma.shape
assert ma2.mask.shape == self.ma.shape
# Here, array can be reshaped but mask cannot.
ma3 = Masked(self.a.copy(), mask=np.broadcast_to([[True], [False]],
self.mask_a.shape))
with pytest.raises(AttributeError, match='ncompatible shape'):
ma3.shape = 6,
assert ma3.shape == self.ma.shape
assert ma3.mask.shape == self.ma.shape
def test_ravel(self):
ma_ravel = self.ma.ravel()
expected_data = self.a.ravel()
expected_mask = self.mask_a.ravel()
assert ma_ravel.shape == expected_data.shape
assert_array_equal(ma_ravel.unmasked, expected_data)
assert_array_equal(ma_ravel.mask, expected_mask)
def test_transpose(self):
ma_transpose = self.ma.transpose()
expected_data = self.a.transpose()
expected_mask = self.mask_a.transpose()
assert ma_transpose.shape == expected_data.shape
assert_array_equal(ma_transpose.unmasked, expected_data)
assert_array_equal(ma_transpose.mask, expected_mask)
def test_iter(self):
for ma, d, m in zip(self.ma, self.a, self.mask_a):
assert_array_equal(ma.unmasked, d)
assert_array_equal(ma.mask, m)
class MaskedItemTests(MaskedArraySetup):
@pytest.mark.parametrize('item', VARIOUS_ITEMS)
def test_getitem(self, item):
ma_part = self.ma[item]
expected_data = self.a[item]
expected_mask = self.mask_a[item]
assert_array_equal(ma_part.unmasked, expected_data)
assert_array_equal(ma_part.mask, expected_mask)
@pytest.mark.parametrize('item', ['a'] + VARIOUS_ITEMS)
def test_getitem_structured(self, item):
ma_part = self.msa[item]
expected_data = self.sa[item]
expected_mask = self.mask_sa[item]
assert_array_equal(ma_part.unmasked, expected_data)
assert_array_equal(ma_part.mask, expected_mask)
@pytest.mark.parametrize('indices,axis', [
([0, 1], 1), ([0, 1], 0), ([0, 1], None), ([[0, 1], [2, 3]], None)])
def test_take(self, indices, axis):
ma_take = self.ma.take(indices, axis=axis)
expected_data = self.a.take(indices, axis=axis)
expected_mask = self.mask_a.take(indices, axis=axis)
assert_array_equal(ma_take.unmasked, expected_data)
assert_array_equal(ma_take.mask, expected_mask)
ma_take2 = np.take(self.ma, indices, axis=axis)
assert_masked_equal(ma_take2, ma_take)
@pytest.mark.parametrize('item', VARIOUS_ITEMS)
@pytest.mark.parametrize('mask', [None, True, False])
def test_setitem(self, item, mask):
base = self.ma.copy()
expected_data = self.a.copy()
expected_mask = self.mask_a.copy()
value = self.a[0, 0] if mask is None else Masked(self.a[0, 0], mask)
base[item] = value
expected_data[item] = value if mask is None else value.unmasked
expected_mask[item] = False if mask is None else value.mask
assert_array_equal(base.unmasked, expected_data)
assert_array_equal(base.mask, expected_mask)
@pytest.mark.parametrize('item', ['a'] + VARIOUS_ITEMS)
@pytest.mark.parametrize('mask', [None, True, False])
def test_setitem_structured(self, item, mask):
base = self.msa.copy()
expected_data = self.sa.copy()
expected_mask = self.mask_sa.copy()
value = self.sa['b'] if item == 'a' else self.sa[0, 0]
if mask is not None:
value = Masked(value, mask)
base[item] = value
expected_data[item] = value if mask is None else value.unmasked
expected_mask[item] = False if mask is None else value.mask
assert_array_equal(base.unmasked, expected_data)
assert_array_equal(base.mask, expected_mask)
@pytest.mark.parametrize('item', VARIOUS_ITEMS)
def test_setitem_np_ma_masked(self, item):
base = self.ma.copy()
expected_mask = self.mask_a.copy()
base[item] = np.ma.masked
expected_mask[item] = True
assert_array_equal(base.unmasked, self.a)
assert_array_equal(base.mask, expected_mask)
class TestMaskedArrayItems(MaskedItemTests):
@classmethod
def setup_class(self):
super().setup_class()
self.d = np.array(['aa', 'bb'])
self.mask_d = np.array([True, False])
self.md = Masked(self.d, self.mask_d)
# Quantity, Longitude cannot hold strings.
def test_getitem_strings(self):
md = self.md.copy()
md0 = md[0]
assert md0.unmasked == self.d[0]
assert md0.mask
md_all = md[:]
assert_masked_equal(md_all, md)
def test_setitem_strings_np_ma_masked(self):
md = self.md.copy()
md[1] = np.ma.masked
assert_array_equal(md.unmasked, self.d)
assert_array_equal(md.mask, np.ones(2, bool))
class TestMaskedQuantityItems(MaskedItemTests, QuantitySetup):
pass
class TestMaskedLongitudeItems(MaskedItemTests, LongitudeSetup):
pass
class MaskedOperatorTests(MaskedArraySetup):
@pytest.mark.parametrize('op', (operator.add, operator.sub))
def test_add_subtract(self, op):
mapmb = op(self.ma, self.mb)
expected_data = op(self.a, self.b)
expected_mask = (self.ma.mask | self.mb.mask)
# Note: assert_array_equal also checks type, i.e., that, e.g.,
# Longitude decays into an Angle.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
@pytest.mark.parametrize('op', (operator.eq, operator.ne))
def test_equality(self, op):
mapmb = op(self.ma, self.mb)
expected_data = op(self.a, self.b)
expected_mask = (self.ma.mask | self.mb.mask)
# Note: assert_array_equal also checks type, i.e., that boolean
# output is represented as plain Masked ndarray.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
def test_not_implemented(self):
with pytest.raises(TypeError):
self.ma > 'abc'
@pytest.mark.parametrize('different_names', [False, True])
@pytest.mark.parametrize('op', (operator.eq, operator.ne))
def test_structured_equality(self, op, different_names):
msb = self.msb
if different_names:
msb = msb.astype([(f'different_{name}', dt)
for name, dt in msb.dtype.fields.items()])
mapmb = op(self.msa, self.msb)
# Expected is a bit tricky here: only unmasked fields count
expected_data = np.ones(mapmb.shape, bool)
expected_mask = np.ones(mapmb.shape, bool)
for field in self.sdt.names:
fa, mfa = self.sa[field], self.mask_sa[field]
fb, mfb = self.sb[field], self.mask_sb[field]
mfequal = mfa | mfb
fequal = (fa == fb) | mfequal
expected_data &= fequal
expected_mask &= mfequal
if op is operator.ne:
expected_data = ~expected_data
# Note: assert_array_equal also checks type, i.e., that boolean
# output is represented as plain Masked ndarray.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
def test_matmul(self):
result = self.ma.T @ self.ma
assert_array_equal(result.unmasked, self.a.T @ self.a)
mask1 = np.any(self.mask_a, axis=0)
expected_mask = np.logical_or.outer(mask1, mask1)
assert_array_equal(result.mask, expected_mask)
result2 = self.ma.T @ self.a
assert_array_equal(result2.unmasked, self.a.T @ self.a)
expected_mask2 = np.logical_or.outer(mask1, np.zeros(3, bool))
assert_array_equal(result2.mask, expected_mask2)
result3 = self.a.T @ self.ma
assert_array_equal(result3.unmasked, self.a.T @ self.a)
expected_mask3 = np.logical_or.outer(np.zeros(3, bool), mask1)
assert_array_equal(result3.mask, expected_mask3)
def test_matvec(self):
result = self.ma @ self.mb
assert np.all(result.mask)
assert_array_equal(result.unmasked, self.a @ self.b)
# Just using the masked vector still has all elements masked.
result2 = self.a @ self.mb
assert np.all(result2.mask)
assert_array_equal(result2.unmasked, self.a @ self.b)
new_ma = self.ma.copy()
new_ma.mask[0, 0] = False
result3 = new_ma @ self.b
assert_array_equal(result3.unmasked, self.a @ self.b)
assert_array_equal(result3.mask, new_ma.mask.any(-1))
def test_vecmat(self):
result = self.mb @ self.ma.T
assert np.all(result.mask)
assert_array_equal(result.unmasked, self.b @ self.a.T)
result2 = self.b @ self.ma.T
assert np.all(result2.mask)
assert_array_equal(result2.unmasked, self.b @ self.a.T)
new_ma = self.ma.T.copy()
new_ma.mask[0, 0] = False
result3 = self.b @ new_ma
assert_array_equal(result3.unmasked, self.b @ self.a.T)
assert_array_equal(result3.mask, new_ma.mask.any(0))
def test_vecvec(self):
result = self.mb @ self.mb
assert result.shape == ()
assert result.mask
assert result.unmasked == self.b @ self.b
mb_no_mask = Masked(self.b, False)
result2 = mb_no_mask @ mb_no_mask
assert not result2.mask
class TestMaskedArrayOperators(MaskedOperatorTests):
# Some further tests that use strings, which are not useful for Quantity.
@pytest.mark.parametrize('op', (operator.eq, operator.ne))
def test_equality_strings(self, op):
m1 = Masked(np.array(['a', 'b', 'c']), mask=[True, False, False])
m2 = Masked(np.array(['a', 'b', 'd']), mask=[False, False, False])
result = op(m1, m2)
assert_array_equal(result.unmasked, op(m1.unmasked, m2.unmasked))
assert_array_equal(result.mask, m1.mask | m2.mask)
result2 = op(m1, m2.unmasked)
assert_masked_equal(result2, result)
def test_not_implemented(self):
with pytest.raises(TypeError):
Masked(['a', 'b']) > object()
class TestMaskedQuantityOperators(MaskedOperatorTests, QuantitySetup):
pass
class TestMaskedLongitudeOperators(MaskedOperatorTests, LongitudeSetup):
pass
class TestMaskedArrayMethods(MaskedArraySetup):
def test_round(self):
# Goes via ufunc, hence easy.
mrc = self.mc.round()
expected = Masked(self.c.round(), self.mask_c)
assert_masked_equal(mrc, expected)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_sum(self, axis):
ma_sum = self.ma.sum(axis)
expected_data = self.a.sum(axis)
expected_mask = self.ma.mask.any(axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_sum_where(self, axis):
where = np.array([
[True, False, False, ],
[True, True, True, ],
])
where_final = ~self.ma.mask & where
ma_sum = self.ma.sum(axis, where=where_final)
expected_data = self.ma.unmasked.sum(axis, where=where_final)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_cumsum(self, axis):
ma_sum = self.ma.cumsum(axis)
expected_data = self.a.cumsum(axis)
mask = self.mask_a
if axis is None:
mask = mask.ravel()
expected_mask = np.logical_or.accumulate(mask, axis=axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_mean(self, axis):
ma_mean = self.ma.mean(axis)
filled = self.a.copy()
filled[self.mask_a] = 0.
count = 1 - self.ma.mask.astype(int)
expected_data = filled.sum(axis) / count.sum(axis)
expected_mask = self.ma.mask.all(axis)
assert_array_equal(ma_mean.unmasked, expected_data)
assert_array_equal(ma_mean.mask, expected_mask)
def test_mean_int16(self):
ma = self.ma.astype('i2')
ma_mean = ma.mean()
assert ma_mean.dtype == 'f8'
expected = ma.astype('f8').mean()
assert_masked_equal(ma_mean, expected)
def test_mean_float16(self):
ma = self.ma.astype('f2')
ma_mean = ma.mean()
assert ma_mean.dtype == 'f2'
expected = self.ma.mean().astype('f2')
assert_masked_equal(ma_mean, expected)
def test_mean_inplace(self):
expected = self.ma.mean(1)
out = Masked(np.zeros_like(expected.unmasked))
result = self.ma.mean(1, out=out)
assert result is out
assert_masked_equal(out, expected)
@pytest.mark.xfail(NUMPY_LT_1_20, reason="'where' keyword argument not supported for numpy < 1.20")
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.filterwarnings("ignore:Mean of empty slice")
@pytest.mark.parametrize('axis', (0, 1, None))
def test_mean_where(self, axis):
where = np.array([
[True, False, False, ],
[True, True, True, ],
])
where_final = ~self.ma.mask & where
ma_mean = self.ma.mean(axis, where=where)
expected_data = self.ma.unmasked.mean(axis, where=where_final)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)
assert_array_equal(ma_mean.unmasked, expected_data)
assert_array_equal(ma_mean.mask, expected_mask)
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.parametrize('axis', (0, 1, None))
def test_var(self, axis):
ma_var = self.ma.var(axis)
filled = (self.a - self.ma.mean(axis, keepdims=True))**2
filled[self.mask_a] = 0.
count = (1 - self.ma.mask.astype(int)).sum(axis)
expected_data = filled.sum(axis) / count
expected_mask = self.ma.mask.all(axis)
assert_array_equal(ma_var.unmasked, expected_data)
assert_array_equal(ma_var.mask, expected_mask)
ma_var1 = self.ma.var(axis, ddof=1)
expected_data1 = filled.sum(axis) / (count - 1)
expected_mask1 = self.ma.mask.all(axis) | (count <= 1)
assert_array_equal(ma_var1.unmasked, expected_data1)
assert_array_equal(ma_var1.mask, expected_mask1)
ma_var5 = self.ma.var(axis, ddof=5)
assert np.all(~np.isfinite(ma_var5.unmasked))
assert ma_var5.mask.all()
def test_var_int16(self):
ma = self.ma.astype('i2')
ma_var = ma.var()
assert ma_var.dtype == 'f8'
expected = ma.astype('f8').var()
assert_masked_equal(ma_var, expected)
@pytest.mark.xfail(NUMPY_LT_1_20, reason="'where' keyword argument not supported for numpy < 1.20")
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.filterwarnings("ignore:Degrees of freedom <= 0 for slice")
@pytest.mark.parametrize('axis', (0, 1, None))
def test_var_where(self, axis):
where = np.array([
[True, False, False, ],
[True, True, True, ],
])
where_final = ~self.ma.mask & where
ma_var = self.ma.var(axis, where=where)
expected_data = self.ma.unmasked.var(axis, where=where_final)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)
assert_array_equal(ma_var.unmasked, expected_data)
assert_array_equal(ma_var.mask, expected_mask)
def test_std(self):
ma_std = self.ma.std(1, ddof=1)
ma_var1 = self.ma.var(1, ddof=1)
expected = np.sqrt(ma_var1)
assert_masked_equal(ma_std, expected)
def test_std_inplace(self):
expected = self.ma.std(1, ddof=1)
out = Masked(np.zeros_like(expected.unmasked))
result = self.ma.std(1, ddof=1, out=out)
assert result is out
assert_masked_equal(result, expected)
@pytest.mark.xfail(NUMPY_LT_1_20, reason="'where' keyword argument not supported for numpy < 1.20")
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.filterwarnings("ignore:Degrees of freedom <= 0 for slice")
@pytest.mark.parametrize('axis', (0, 1, None))
def test_std_where(self, axis):
where = np.array([
[True, False, False, ],
[True, True, True, ],
])
where_final = ~self.ma.mask & where
ma_std = self.ma.std(axis, where=where)
expected_data = self.ma.unmasked.std(axis, where=where_final)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)
assert_array_equal(ma_std.unmasked, expected_data)
assert_array_equal(ma_std.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_min(self, axis):
ma_min = self.ma.min(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.max()
expected_data = filled.min(axis)
assert_array_equal(ma_min.unmasked, expected_data)
assert not np.any(ma_min.mask)
def test_min_with_masked_nan(self):
ma = Masked([3., np.nan, 2.], mask=[False, True, False])
ma_min = ma.min()
assert_array_equal(ma_min.unmasked, np.array(2.))
assert not ma_min.mask
@pytest.mark.parametrize('axis', (0, 1, None))
def test_min_where(self, axis):
where = np.array([
[True, False, False, ],
[True, True, True, ],
])
where_final = ~self.ma.mask & where
ma_min = self.ma.min(axis, where=where_final, initial=np.inf)
expected_data = self.ma.unmasked.min(axis, where=where_final, initial=np.inf)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)
assert_array_equal(ma_min.unmasked, expected_data)
assert_array_equal(ma_min.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_max(self, axis):
ma_max = self.ma.max(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.min()
expected_data = filled.max(axis)
assert_array_equal(ma_max.unmasked, expected_data)
assert not np.any(ma_max.mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_max_where(self, axis):
where = np.array([
[True, False, False, ],
[True, True, True, ],
])
where_final = ~self.ma.mask & where
ma_max = self.ma.max(axis, where=where_final, initial=-np.inf)
expected_data = self.ma.unmasked.max(axis, where=where_final, initial=-np.inf)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)
assert_array_equal(ma_max.unmasked, expected_data)
assert_array_equal(ma_max.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_argmin(self, axis):
ma_argmin = self.ma.argmin(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.max()
expected_data = filled.argmin(axis)
assert_array_equal(ma_argmin, expected_data)
def test_argmin_only_one_unmasked_element(self):
# Regression test for example from @taldcroft at
# https://github.com/astropy/astropy/pull/11127#discussion_r600864559
ma = Masked(data=[1, 2], mask=[True, False])
assert ma.argmin() == 1
@pytest.mark.parametrize('axis', (0, 1, None))
def test_argmax(self, axis):
ma_argmax = self.ma.argmax(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.min()
expected_data = filled.argmax(axis)
assert_array_equal(ma_argmax, expected_data)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_argsort(self, axis):
ma_argsort = self.ma.argsort(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.max() * 1.1
expected_data = filled.argsort(axis)
assert_array_equal(ma_argsort, expected_data)
@pytest.mark.parametrize('order', [None, 'a', ('a', 'b'), ('b', 'a')])
@pytest.mark.parametrize('axis', [0, 1])
def test_structured_argsort(self, axis, order):
ma_argsort = self.msa.argsort(axis, order=order)
filled = self.msa.filled(fill_value=np.array((np.inf, np.inf),
dtype=self.sdt))
expected_data = filled.argsort(axis, order=order)
assert_array_equal(ma_argsort, expected_data)
def test_argsort_error(self):
with pytest.raises(ValueError, match='when the array has no fields'):
self.ma.argsort(axis=0, order='a')
@pytest.mark.parametrize('axis', (0, 1))
def test_sort(self, axis):
ma_sort = self.ma.copy()
ma_sort.sort(axis)
indices = self.ma.argsort(axis)
expected_data = np.take_along_axis(self.ma.unmasked, indices, axis)
expected_mask = np.take_along_axis(self.ma.mask, indices, axis)
assert_array_equal(ma_sort.unmasked, expected_data)
assert_array_equal(ma_sort.mask, expected_mask)
@pytest.mark.parametrize('kth', [1, 3])
def test_argpartition(self, kth):
ma = self.ma.ravel()
ma_argpartition = ma.argpartition(kth)
partitioned = ma[ma_argpartition]
assert (partitioned[:kth] < partitioned[kth]).all()
assert (partitioned[kth:] >= partitioned[kth]).all()
if partitioned[kth].mask:
assert all(partitioned.mask[kth:])
else:
assert not any(partitioned.mask[:kth])
@pytest.mark.parametrize('kth', [1, 3])
def test_partition(self, kth):
partitioned = self.ma.flatten()
partitioned.partition(kth)
assert (partitioned[:kth] < partitioned[kth]).all()
assert (partitioned[kth:] >= partitioned[kth]).all()
if partitioned[kth].mask:
assert all(partitioned.mask[kth:])
else:
assert not any(partitioned.mask[:kth])
def test_all_explicit(self):
a1 = np.array([[1., 2.],
[3., 4.]])
a2 = np.array([[1., 0.],
[3., 4.]])
if self._data_cls is not np.ndarray:
a1 = self._data_cls(a1, self.a.unit)
a2 = self._data_cls(a2, self.a.unit)
ma1 = Masked(a1, mask=[[False, False],
[True, True]])
ma2 = Masked(a2, mask=[[False, True],
[False, True]])
ma1_eq_ma2 = ma1 == ma2
assert_array_equal(ma1_eq_ma2.unmasked, np.array([[True, False],
[True, True]]))
assert_array_equal(ma1_eq_ma2.mask, np.array([[False, True],
[True, True]]))
assert ma1_eq_ma2.all()
assert not (ma1 != ma2).all()
ma_eq1 = ma1_eq_ma2.all(1)
assert_array_equal(ma_eq1.mask, np.array([False, True]))
assert bool(ma_eq1[0]) is True
assert bool(ma_eq1[1]) is False
ma_eq0 = ma1_eq_ma2.all(0)
assert_array_equal(ma_eq0.mask, np.array([False, True]))
assert bool(ma_eq1[0]) is True
assert bool(ma_eq1[1]) is False
@pytest.mark.parametrize('method', ['any', 'all'])
@pytest.mark.parametrize('array,axis', [
('a', 0), ('a', 1), ('a', None),
('b', None),
('c', 0), ('c', 1), ('c', None)])
def test_all_and_any(self, array, axis, method):
ma = getattr(self, 'm'+array)
ma_eq = ma == ma
ma_all_or_any = getattr(ma_eq, method)(axis=axis)
filled = ma_eq.unmasked.copy()
filled[ma_eq.mask] = method == 'all'
a_all_or_any = getattr(filled, method)(axis=axis)
all_masked = ma.mask.all(axis)
assert_array_equal(ma_all_or_any.mask, all_masked)
assert_array_equal(ma_all_or_any.unmasked, a_all_or_any)
# interpretation as bool
as_bool = [bool(a) for a in ma_all_or_any.ravel()]
expected = [bool(a) for a in (a_all_or_any & ~all_masked).ravel()]
assert as_bool == expected
def test_any_inplace(self):
ma_eq = self.ma == self.ma
expected = ma_eq.any(1)
out = Masked(np.zeros_like(expected.unmasked))
result = ma_eq.any(1, out=out)
assert result is out
assert_masked_equal(result, expected)
@pytest.mark.xfail(NUMPY_LT_1_20, reason="'where' keyword argument not supported for numpy < 1.20")
@pytest.mark.parametrize('method', ('all', 'any'))
@pytest.mark.parametrize('axis', (0, 1, None))
def test_all_and_any_where(self, method, axis):
where = np.array([
[True, False, False, ],
[True, True, True, ],
])
where_final = ~self.ma.mask & where
ma_eq = self.ma == self.ma
ma_any = getattr(ma_eq, method)(axis, where=where)
expected_data = getattr(ma_eq.unmasked, method)(axis, where=where_final)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)
assert_array_equal(ma_any.unmasked, expected_data)
assert_array_equal(ma_any.mask, expected_mask)
@pytest.mark.parametrize('offset', (0, 1))
def test_diagonal(self, offset):
mda = self.ma.diagonal(offset=offset)
expected = Masked(self.a.diagonal(offset=offset),
self.mask_a.diagonal(offset=offset))
assert_masked_equal(mda, expected)
@pytest.mark.parametrize('offset', (0, 1))
def test_trace(self, offset):
mta = self.ma.trace(offset=offset)
expected = Masked(self.a.trace(offset=offset),
self.mask_a.trace(offset=offset, dtype=bool))
assert_masked_equal(mta, expected)
def test_clip(self):
maclip = self.ma.clip(self.b, self.c)
expected = Masked(self.a.clip(self.b, self.c), self.mask_a)
assert_masked_equal(maclip, expected)
def test_clip_masked_min_max(self):
maclip = self.ma.clip(self.mb, self.mc)
# Need to be careful with min, max because of Longitude, which wraps.
dmax = np.maximum(np.maximum(self.a, self.b), self.c).max()
dmin = np.minimum(np.minimum(self.a, self.b), self.c).min()
expected = Masked(self.a.clip(self.mb.filled(dmin),
self.mc.filled(dmax)),
mask=self.mask_a)
assert_masked_equal(maclip, expected)
class TestMaskedQuantityMethods(TestMaskedArrayMethods, QuantitySetup):
pass
class TestMaskedLongitudeMethods(TestMaskedArrayMethods, LongitudeSetup):
pass
class TestMaskedArrayProductMethods(MaskedArraySetup):
# These cannot work on Quantity, so done separately
@pytest.mark.parametrize('axis', (0, 1, None))
def test_prod(self, axis):
ma_sum = self.ma.prod(axis)
expected_data = self.a.prod(axis)
expected_mask = self.ma.mask.any(axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_cumprod(self, axis):
ma_sum = self.ma.cumprod(axis)
expected_data = self.a.cumprod(axis)
mask = self.mask_a
if axis is None:
mask = mask.ravel()
expected_mask = np.logical_or.accumulate(mask, axis=axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
def test_masked_str_explicit():
sa = np.array([(1., 2.), (3., 4.)], dtype='f8,f8')
msa = Masked(sa, [(False, True), (False, False)])
assert str(msa) == "[(1., ——) (3., 4.)]"
assert str(msa[0]) == "(1., ——)"
assert str(msa[1]) == "(3., 4.)"
with np.printoptions(precision=3, floatmode='fixed'):
assert str(msa) == "[(1.000, ———) (3.000, 4.000)]"
def test_masked_repr_explicit():
# Use explicit endianness to ensure tests pass on all architectures
sa = np.array([(1., 2.), (3., 4.)], dtype='>f8,>f8')
msa = Masked(sa, [(False, True), (False, False)])
assert repr(msa) == ("MaskedNDArray([(1., ——), (3., 4.)], "
"dtype=[('f0', '>f8'), ('f1', '>f8')])")
assert repr(msa[0]) == ("MaskedNDArray((1., ——), "
"dtype=[('f0', '>f8'), ('f1', '>f8')])")
assert repr(msa[1]) == ("MaskedNDArray((3., 4.), "
"dtype=[('f0', '>f8'), ('f1', '>f8')])")
def test_masked_repr_summary():
ma = Masked(np.arange(15.), mask=[True]+[False]*14)
with np.printoptions(threshold=2):
assert repr(ma) == (
"MaskedNDArray([———, 1., 2., ..., 12., 13., 14.])")
def test_masked_repr_nodata():
assert repr(Masked([])) == "MaskedNDArray([], dtype=float64)"
class TestMaskedArrayRepr(MaskedArraySetup):
def test_array_str(self):
# very blunt check they work at all.
str(self.ma)
str(self.mb)
str(self.mc)
str(self.msa)
str(self.msb)
def test_scalar_str(self):
assert self.mb[0].shape == ()
str(self.mb[0])
assert self.msb[0].shape == ()
str(self.msb[0])
def test_array_repr(self):
repr(self.ma)
repr(self.mb)
repr(self.mc)
repr(self.msa)
repr(self.msb)
def test_scalar_repr(self):
repr(self.mb[0])
repr(self.msb[0])
class TestMaskedQuantityRepr(TestMaskedArrayRepr, QuantitySetup):
pass
class TestMaskedRecarray(MaskedArraySetup):
@classmethod
def setup_class(self):
super().setup_class()
self.ra = self.sa.view(np.recarray)
self.mra = Masked(self.ra, mask=self.mask_sa)
def test_recarray_setup(self):
assert isinstance(self.mra, Masked)
assert isinstance(self.mra, np.recarray)
assert np.all(self.mra.unmasked == self.ra)
assert np.all(self.mra.mask == self.mask_sa)
assert_array_equal(self.mra.view(np.ndarray), self.sa)
assert isinstance(self.mra.a, Masked)
assert_array_equal(self.mra.a.unmasked, self.sa['a'])
assert_array_equal(self.mra.a.mask, self.mask_sa['a'])
def test_recarray_setting(self):
mra = self.mra.copy()
mra.a = self.msa['b']
assert_array_equal(mra.a.unmasked, self.msa['b'].unmasked)
assert_array_equal(mra.a.mask, self.msa['b'].mask)
@pytest.mark.parametrize('attr', [0, 'a'])
def test_recarray_field_getting(self, attr):
mra_a = self.mra.field(attr)
assert isinstance(mra_a, Masked)
assert_array_equal(mra_a.unmasked, self.sa['a'])
assert_array_equal(mra_a.mask, self.mask_sa['a'])
@pytest.mark.parametrize('attr', [0, 'a'])
def test_recarray_field_setting(self, attr):
mra = self.mra.copy()
mra.field(attr, self.msa['b'])
assert_array_equal(mra.a.unmasked, self.msa['b'].unmasked)
assert_array_equal(mra.a.mask, self.msa['b'].mask)
class TestMaskedArrayInteractionWithNumpyMA(MaskedArraySetup):
def test_masked_array_from_masked(self):
"""Check that we can initialize a MaskedArray properly."""
np_ma = np.ma.MaskedArray(self.ma)
assert type(np_ma) is np.ma.MaskedArray
assert type(np_ma.data) is self._data_cls
assert type(np_ma.mask) is np.ndarray
assert_array_equal(np_ma.data, self.a)
assert_array_equal(np_ma.mask, self.mask_a)
def test_view_as_masked_array(self):
"""Test that we can be viewed as a MaskedArray."""
np_ma = self.ma.view(np.ma.MaskedArray)
assert type(np_ma) is np.ma.MaskedArray
assert type(np_ma.data) is self._data_cls
assert type(np_ma.mask) is np.ndarray
assert_array_equal(np_ma.data, self.a)
assert_array_equal(np_ma.mask, self.mask_a)
class TestMaskedQuantityInteractionWithNumpyMA(
TestMaskedArrayInteractionWithNumpyMA, QuantitySetup):
pass
|
89e2abdc2f8be00def53c45683394f92494c44d90636dfeabca3c2b3ee16c5ab | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test all functions covered by __array_function__.
Here, run through all functions, with simple tests just to check the helpers.
More complicated tests of functionality, including with subclasses, are done
in test_functions.
TODO: finish full coverage (see also `~astropy.utils.masked.function_helpers`)
- np.linalg
- np.fft (is there any point?)
- np.lib.nanfunctions
"""
import inspect
import itertools
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from astropy.utils.compat import NUMPY_LT_1_19, NUMPY_LT_1_20, NUMPY_LT_1_23
from astropy.units.tests.test_quantity_non_ufuncs import (
get_wrapped_functions)
from astropy.utils.masked import Masked, MaskedNDArray
from astropy.utils.masked.function_helpers import (
MASKED_SAFE_FUNCTIONS,
APPLY_TO_BOTH_FUNCTIONS,
DISPATCHED_FUNCTIONS,
IGNORED_FUNCTIONS,
UNSUPPORTED_FUNCTIONS)
from .test_masked import assert_masked_equal, MaskedArraySetup
all_wrapped_functions = get_wrapped_functions(np)
all_wrapped = set(all_wrapped_functions.values())
class BasicTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
out = func(self.ma, *args, **kwargs)
expected = Masked(func(self.a, *args, **kwargs),
mask=func(self.mask_a, *args, **kwargs))
assert_masked_equal(out, expected)
def check2(self, func, *args, **kwargs):
out = func(self.ma, self.mb, *args, **kwargs)
expected = Masked(func(self.a, self.b, *args, **kwargs),
mask=func(self.mask_a, self.mask_b, *args, **kwargs))
if isinstance(out, (tuple, list)):
for o, x in zip(out, expected):
assert_masked_equal(o, x)
else:
assert_masked_equal(out, expected)
class NoMaskTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
o = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
assert_array_equal(o, expected)
class InvariantMaskTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
o = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, self.mask_a)
class TestShapeInformation(BasicTestSetup):
def test_shape(self):
assert np.shape(self.ma) == (2, 3)
def test_size(self):
assert np.size(self.ma) == 6
def test_ndim(self):
assert np.ndim(self.ma) == 2
class TestShapeManipulation(BasicTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (6, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
self.check(np.atleast_1d)
o, so = np.atleast_1d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1,)
def test_atleast_2d(self):
self.check(np.atleast_2d)
o, so = np.atleast_2d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1, 1)
def test_atleast_3d(self):
self.check(np.atleast_3d)
o, so = np.atleast_3d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1, 1, 1)
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.mc)
assert o.shape == o.mask.shape == (2,)
assert_array_equal(o.unmasked, self.c.squeeze())
assert_array_equal(o.mask, self.mask_c.squeeze())
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
self.check(np.broadcast_to, (3, 2, 3))
self.check(np.broadcast_to, (3, 2, 3), subok=False)
def test_broadcast_arrays(self):
self.check2(np.broadcast_arrays)
self.check2(np.broadcast_arrays, subok=False)
class TestArgFunctions(MaskedArraySetup):
def check(self, function, *args, fill_value=np.nan, **kwargs):
o = function(self.ma, *args, **kwargs)
a_filled = self.ma.filled(fill_value=fill_value)
expected = function(a_filled, *args, **kwargs)
assert_array_equal(o, expected)
def test_argmin(self):
self.check(np.argmin, fill_value=np.inf)
def test_argmax(self):
self.check(np.argmax, fill_value=-np.inf)
def test_argsort(self):
self.check(np.argsort, fill_value=np.nan)
def test_lexsort(self):
self.check(np.lexsort, fill_value=np.nan)
def test_nonzero(self):
self.check(np.nonzero, fill_value=0.)
@pytest.mark.filterwarnings('ignore:Calling nonzero on 0d arrays is deprecated')
def test_nonzero_0d(self):
res1 = Masked(1, mask=False).nonzero()
assert len(res1) == 1
assert_array_equal(res1[0], np.ones(()).nonzero()[0])
res2 = Masked(1, mask=True).nonzero()
assert len(res2) == 1
assert_array_equal(res2[0], np.zeros(()).nonzero()[0])
def test_argwhere(self):
self.check(np.argwhere, fill_value=0.)
def test_argpartition(self):
self.check(np.argpartition, 2, fill_value=np.inf)
def test_flatnonzero(self):
self.check(np.flatnonzero, fill_value=0.)
class TestAlongAxis(MaskedArraySetup):
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.ma, axis=0), axis=0)
out = np.take_along_axis(self.ma, indices, axis=0)
expected = np.take_along_axis(self.a, indices, axis=0)
expected_mask = np.take_along_axis(self.mask_a, indices, axis=0)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_put_along_axis(self):
ma = self.ma.copy()
indices = np.expand_dims(np.argmax(self.ma, axis=0), axis=0)
np.put_along_axis(ma, indices, axis=0, values=-1)
expected = self.a.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, self.mask_a)
np.put_along_axis(ma, indices, axis=0, values=np.ma.masked)
assert_array_equal(ma.unmasked, expected)
expected_mask = self.mask_a.copy()
np.put_along_axis(expected_mask, indices, axis=0, values=True)
assert_array_equal(ma.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.ma)
expected = np.apply_along_axis(np.square, axis, self.a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
@pytest.mark.parametrize('axes', [(1,), 0, (0, -1)])
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.mean(np.square(x), axis)
out = np.apply_over_axes(function, self.ma, axes)
expected = self.ma
for axis in (axes if isinstance(axes, tuple) else (axes,)):
expected = (expected**2).mean(axis, keepdims=True)
assert_array_equal(out.unmasked, expected.unmasked)
assert_array_equal(out.mask, expected.mask)
def test_apply_over_axes_no_reduction(self):
out = np.apply_over_axes(np.cumsum, self.ma, 0)
expected = self.ma.cumsum(axis=0)
assert_masked_equal(out, expected)
def test_apply_over_axes_wrong_size(self):
with pytest.raises(ValueError, match='not.*correct shape'):
np.apply_over_axes(lambda x, axis: x[..., np.newaxis], self.ma, 0)
class TestIndicesFrom(NoMaskTestSetup):
@classmethod
def setup_class(self):
self.a = np.arange(9).reshape(3, 3)
self.mask_a = np.eye(3, dtype=bool)
self.ma = Masked(self.a, self.mask_a)
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantMaskTestSetup):
@classmethod
def setup_class(self):
self.a = np.array([1+2j, 3+4j])
self.mask_a = np.array([True, False])
self.ma = Masked(self.a, mask=self.mask_a)
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantMaskTestSetup):
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.ma)
assert_array_equal(copy, self.ma)
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.ma)
assert_array_equal(farray, self.ma)
class TestArrayCreation(MaskedArraySetup):
def test_empty_like(self):
o = np.empty_like(self.ma)
assert o.shape == (2, 3)
assert isinstance(o, Masked)
assert isinstance(o, np.ndarray)
o2 = np.empty_like(prototype=self.ma)
assert o2.shape == (2, 3)
assert isinstance(o2, Masked)
assert isinstance(o2, np.ndarray)
o3 = np.empty_like(self.ma, subok=False)
assert type(o3) is MaskedNDArray
def test_zeros_like(self):
o = np.zeros_like(self.ma)
assert_array_equal(o.unmasked, np.zeros_like(self.a))
assert_array_equal(o.mask, np.zeros_like(self.mask_a))
o2 = np.zeros_like(a=self.ma)
assert_array_equal(o2.unmasked, np.zeros_like(self.a))
assert_array_equal(o2.mask, np.zeros_like(self.mask_a))
def test_ones_like(self):
o = np.ones_like(self.ma)
assert_array_equal(o.unmasked, np.ones_like(self.a))
assert_array_equal(o.mask, np.zeros_like(self.mask_a))
o2 = np.ones_like(a=self.ma)
assert_array_equal(o2.unmasked, np.ones_like(self.a))
assert_array_equal(o2.mask, np.zeros_like(self.mask_a))
@pytest.mark.parametrize('value', [0.5, Masked(0.5, mask=True), np.ma.masked])
def test_full_like(self, value):
o = np.full_like(self.ma, value)
if value is np.ma.masked:
expected = Masked(o.unmasked, True)
else:
expected = Masked(np.empty_like(self.a))
expected[...] = value
assert_array_equal(o.unmasked, expected.unmasked)
assert_array_equal(o.mask, expected.mask)
class TestAccessingParts(BasicTestSetup):
def test_diag(self):
self.check(np.diag)
def test_diag_1d_input(self):
ma = self.ma.ravel()
o = np.diag(ma)
assert_array_equal(o.unmasked, np.diag(self.a.ravel()))
assert_array_equal(o.mask, np.diag(self.mask_a.ravel()))
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False], self.ma, axis=0)
expected = np.compress([True, False], self.a, axis=0)
expected_mask = np.compress([True, False], self.mask_a, axis=0)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_extract(self):
o = np.extract([True, False, True], self.ma)
expected = np.extract([True, False, True], self.a)
expected_mask = np.extract([True, False, True], self.mask_a)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(MaskedArraySetup):
def test_put(self):
ma = self.ma.copy()
v = Masked([50, 150], [False, True])
np.put(ma, [0, 2], v)
expected = self.a.copy()
np.put(expected, [0, 2], [50, 150])
expected_mask = self.mask_a.copy()
np.put(expected_mask, [0, 2], [False, True])
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
# Indices cannot be masked.
np.put(ma, Masked([0, 2]), v)
with pytest.raises(TypeError):
# Array to put masked values in must be masked.
np.put(self.a.copy(), [0, 2], v)
def test_putmask(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked(np.arange(100, 650, 100),
mask=[False, True, True, True, False, False])
np.putmask(ma, mask, values)
expected = self.a.flatten()
np.putmask(expected, mask, values.unmasked)
expected_mask = self.mask_a.flatten()
np.putmask(expected_mask, mask, values.mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.putmask(self.a.flatten(), mask, values)
def test_place(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked([100, 200], mask=[False, True])
np.place(ma, mask, values)
expected = self.a.flatten()
np.place(expected, mask, values.unmasked)
expected_mask = self.mask_a.flatten()
np.place(expected_mask, mask, values.mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.place(self.a.flatten(), mask, values)
def test_copyto(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked(np.arange(100, 650, 100),
mask=[False, True, True, True, False, False])
np.copyto(ma, values, where=mask)
expected = self.a.flatten()
np.copyto(expected, values.unmasked, where=mask)
expected_mask = self.mask_a.flatten()
np.copyto(expected_mask, values.mask, where=mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.copyto(self.a.flatten(), values, where=mask)
@pytest.mark.parametrize('value', [0.25, np.ma.masked])
def test_fill_diagonal(self, value):
ma = self.ma[:2, :2].copy()
np.fill_diagonal(ma, value)
expected = ma.copy()
expected[np.diag_indices_from(expected)] = value
assert_array_equal(ma.unmasked, expected.unmasked)
assert_array_equal(ma.mask, expected.mask)
class TestRepeat(BasicTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(MaskedArraySetup):
# More tests at TestMaskedArrayConcatenation in test_functions.
def check(self, func, *args, **kwargs):
ma_list = kwargs.pop('ma_list', [self.ma, self.ma])
a_list = [Masked(ma).unmasked for ma in ma_list]
m_list = [Masked(ma).mask for ma in ma_list]
o = func(ma_list, *args, **kwargs)
expected = func(a_list, *args, **kwargs)
expected_mask = func(m_list, *args, **kwargs)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
self.check(np.concatenate, ma_list=[self.a, self.ma])
out = Masked(np.empty((4, 3)))
result = np.concatenate([self.ma, self.ma], out=out)
assert out is result
expected = np.concatenate([self.a, self.a])
expected_mask = np.concatenate([self.mask_a, self.mask_a])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(TypeError):
np.concatenate([self.ma, self.ma], out=np.empty((4, 3)))
def test_stack(self):
self.check(np.stack)
def test_column_stack(self):
self.check(np.column_stack)
def test_hstack(self):
self.check(np.hstack)
def test_vstack(self):
self.check(np.vstack)
def test_dstack(self):
self.check(np.dstack)
def test_block(self):
self.check(np.block)
out = np.block([[0., Masked(1., True)],
[Masked(1, False), Masked(2, False)]])
expected = np.array([[0, 1.], [1, 2]])
expected_mask = np.array([[False, True], [False, False]])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_append(self):
out = np.append(self.ma, self.mc, axis=1)
expected = np.append(self.a, self.c, axis=1)
expected_mask = np.append(self.mask_a, self.mask_c, axis=1)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_insert(self):
obj = (1, 1)
values = Masked([50., 25.], mask=[True, False])
out = np.insert(self.ma.flatten(), obj, values)
expected = np.insert(self.a.flatten(), obj, [50., 25.])
expected_mask = np.insert(self.mask_a.flatten(), obj, [True, False])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(TypeError):
np.insert(self.a.flatten(), obj, values)
with pytest.raises(TypeError):
np.insert(self.ma.flatten(), Masked(obj), values)
class TestSplit:
@classmethod
def setup_class(self):
self.a = np.arange(54.).reshape(3, 3, 6)
self.mask_a = np.zeros(self.a.shape, dtype=bool)
self.mask_a[1, 1, 1] = True
self.mask_a[0, 1, 4] = True
self.mask_a[1, 2, 5] = True
self.ma = Masked(self.a, mask=self.mask_a)
def check(self, func, *args, **kwargs):
out = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
expected_mask = func(self.mask_a, *args, **kwargs)
assert len(out) == len(expected)
for o, x, xm in zip(out, expected, expected_mask):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, xm)
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestMethodLikes(MaskedArraySetup):
def check(self, function, *args, method=None, **kwargs):
if method is None:
method = function.__name__
o = function(self.ma, *args, **kwargs)
x = getattr(self.ma, method)(*args, **kwargs)
assert_masked_equal(o, x)
def test_amax(self):
self.check(np.amax, method='max')
def test_amin(self):
self.check(np.amin, method='min')
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
self.check(np.any)
def test_all(self):
self.check(np.all)
def test_sometrue(self):
self.check(np.sometrue, method='any')
def test_alltrue(self):
self.check(np.alltrue, method='all')
def test_prod(self):
self.check(np.prod)
def test_product(self):
self.check(np.product, method='prod')
def test_cumprod(self):
self.check(np.cumprod)
def test_cumproduct(self):
self.check(np.cumproduct, method='cumprod')
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round_(self):
self.check(np.round_, method='round')
def test_around(self):
self.check(np.around, method='round')
def test_clip(self):
self.check(np.clip, 2., 4.)
self.check(np.clip, self.mb, self.mc)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
self.check(np.var)
class TestUfuncLike(InvariantMaskTestSetup):
def test_fix(self):
self.check(np.fix)
def test_angle(self):
a = np.array([1+0j, 0+1j, 1+1j, 0+0j])
mask_a = np.array([True, False, True, False])
ma = Masked(a, mask=mask_a)
out = np.angle(ma)
expected = np.angle(ma.unmasked)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_a)
def test_i0(self):
self.check(np.i0)
def test_sinc(self):
self.check(np.sinc)
def test_where(self):
mask = [True, False, True]
out = np.where(mask, self.ma, 1000.)
expected = np.where(mask, self.a, 1000.)
expected_mask = np.where(mask, self.mask_a, False)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
mask2 = Masked(mask, [True, False, False])
out2 = np.where(mask2, self.ma, 1000.)
expected2 = np.where(mask, self.a, 1000.)
expected_mask2 = np.where(mask, self.mask_a, False) | mask2.mask
assert_array_equal(out2.unmasked, expected2)
assert_array_equal(out2.mask, expected_mask2)
def test_where_single_arg(self):
m = Masked(np.arange(3), mask=[True, False, False])
out = np.where(m)
expected = m.nonzero()
assert isinstance(out, tuple) and len(out) == 1
assert_array_equal(out[0], expected[0])
def test_where_wrong_number_of_arg(self):
with pytest.raises(ValueError, match='either both or neither'):
np.where([True, False, False], self.a)
def test_choose(self):
a = np.array([0, 1]).reshape((2, 1))
result = np.choose(a, (self.ma, self.mb))
expected = np.choose(a, (self.a, self.b))
expected_mask = np.choose(a, (self.mask_a, self.mask_b))
assert_array_equal(result.unmasked, expected)
assert_array_equal(result.mask, expected_mask)
out = np.zeros_like(result)
result2 = np.choose(a, (self.ma, self.mb), out=out)
assert result2 is out
assert_array_equal(result2, result)
with pytest.raises(TypeError):
np.choose(a, (self.ma, self.mb), out=np.zeros_like(expected))
def test_choose_masked(self):
ma = Masked(np.array([-1, 1]), mask=[True, False]).reshape((2, 1))
out = ma.choose((self.ma, self.mb))
expected = np.choose(ma.filled(0), (self.a, self.b))
expected_mask = np.choose(ma.filled(0), (self.mask_a, self.mask_b)) | ma.mask
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(ValueError):
ma.unmasked.choose((self.ma, self.mb))
@pytest.mark.parametrize('default', [-1., np.ma.masked, Masked(-1, mask=True)])
def test_select(self, default):
a, mask_a, ma = self.a, self.mask_a, self.ma
out = np.select([a < 1.5, a > 3.5], [ma, ma+1], default=default)
expected = np.select([a < 1.5, a > 3.5], [a, a+1],
default=-1 if default is not np.ma.masked else 0)
expected_mask = np.select([a < 1.5, a > 3.5], [mask_a, mask_a],
default=getattr(default, 'mask', False))
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_real_if_close(self):
a = np.array([1+0j, 0+1j, 1+1j, 0+0j])
mask_a = np.array([True, False, True, False])
ma = Masked(a, mask=mask_a)
out = np.real_if_close(ma)
expected = np.real_if_close(a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_a)
def test_tril(self):
self.check(np.tril)
def test_triu(self):
self.check(np.triu)
def test_unwrap(self):
self.check(np.unwrap)
def test_nan_to_num(self):
self.check(np.nan_to_num)
ma = Masked([np.nan, 1.], mask=[True, False])
o = np.nan_to_num(ma, copy=False)
assert_masked_equal(o, Masked([0., 1.], mask=[True, False]))
assert ma is o
class TestUfuncLikeTests:
@classmethod
def setup_class(self):
self.a = np.array([[-np.inf, +np.inf, np.nan, 3., 4.]]*2)
self.mask_a = np.array([[False]*5, [True]*4+[False]])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([[3.0001], [3.9999]])
self.mask_b = np.array([[True], [False]])
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, func):
out = func(self.ma)
expected = func(self.a)
assert type(out) is MaskedNDArray
assert out.dtype.kind == 'b'
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
assert not np.may_share_memory(out.mask, self.mask_a)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
o = np.isreal(Masked([1. + 1j], mask=False))
assert not o.unmasked and not o.mask
o = np.isreal(Masked([1. + 1j], mask=True))
assert not o.unmasked and o.mask
def test_iscomplex(self):
self.check(np.iscomplex)
o = np.iscomplex(Masked([1. + 1j], mask=False))
assert o.unmasked and not o.mask
o = np.iscomplex(Masked([1. + 1j], mask=True))
assert o.unmasked and o.mask
def test_isclose(self):
out = np.isclose(self.ma, self.mb, atol=0.01)
expected = np.isclose(self.ma, self.mb, atol=0.01)
expected_mask = self.mask_a | self.mask_b
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_allclose(self):
out = np.allclose(self.ma, self.mb, atol=0.01)
expected = np.isclose(self.ma, self.mb,
atol=0.01)[self.mask_a | self.mask_b].all()
assert_array_equal(out, expected)
def test_array_equal(self):
assert not np.array_equal(self.ma, self.ma)
assert not np.array_equal(self.ma, self.a)
if not NUMPY_LT_1_19:
assert np.array_equal(self.ma, self.ma, equal_nan=True)
assert np.array_equal(self.ma, self.a, equal_nan=True)
assert not np.array_equal(self.ma, self.mb)
ma2 = self.ma.copy()
ma2.mask |= np.isnan(self.a)
assert np.array_equal(ma2, self.ma)
def test_array_equiv(self):
assert np.array_equiv(self.mb, self.mb)
assert np.array_equiv(self.mb, self.b)
assert not np.array_equiv(self.ma, self.mb)
assert np.array_equiv(self.mb, np.stack([self.mb, self.mb]))
class TestOuterLikeFunctions(MaskedArraySetup):
def test_outer(self):
result = np.outer(self.ma, self.mb)
expected_data = np.outer(self.a.ravel(), self.b.ravel())
expected_mask = np.logical_or.outer(self.mask_a.ravel(),
self.mask_b.ravel())
assert_array_equal(result.unmasked, expected_data)
assert_array_equal(result.mask, expected_mask)
out = np.zeros_like(result)
result2 = np.outer(self.ma, self.mb, out=out)
assert result2 is out
assert result2 is not result
assert_masked_equal(result2, result)
out2 = np.zeros_like(result.unmasked)
with pytest.raises(TypeError):
np.outer(self.ma, self.mb, out=out2)
def test_kron(self):
result = np.kron(self.ma, self.mb)
expected_data = np.kron(self.a, self.b)
expected_mask = np.logical_or.outer(self.mask_a,
self.mask_b).reshape(result.shape)
assert_array_equal(result.unmasked, expected_data)
assert_array_equal(result.mask, expected_mask)
class TestReductionLikeFunctions(MaskedArraySetup):
def test_average(self):
o = np.average(self.ma)
assert_masked_equal(o, self.ma.mean())
o = np.average(self.ma, weights=self.mb, axis=-1)
expected = np.average(self.a, weights=self.b, axis=-1)
expected_mask = (self.mask_a | self.mask_b).any(-1)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_trace(self):
o = np.trace(self.ma)
expected = np.trace(self.a)
expected_mask = np.trace(self.mask_a).astype(bool)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
@pytest.mark.parametrize('axis', [0, 1, None])
def test_count_nonzero(self, axis):
o = np.count_nonzero(self.ma, axis=axis)
expected = np.count_nonzero(self.ma.filled(0), axis=axis)
assert_array_equal(o, expected)
@pytest.mark.filterwarnings('ignore:all-nan')
class TestPartitionLikeFunctions:
@classmethod
def setup_class(self):
self.a = np.arange(36.).reshape(6, 6)
self.mask_a = np.zeros_like(self.a, bool)
# On purpose fill diagonal, so we get all masked elements.
self.mask_a[np.tril_indices_from(self.a)] = True
self.ma = Masked(self.a, mask=self.mask_a)
def check(self, function, *args, **kwargs):
o = function(self.ma, *args, **kwargs)
nanfunc = getattr(np, 'nan'+function.__name__)
nanfilled = self.ma.filled(np.nan)
expected = nanfunc(nanfilled, *args, **kwargs)
assert_array_equal(o.filled(np.nan), expected)
assert_array_equal(o.mask, np.isnan(expected))
if not kwargs.get('axis', 1):
# no need to test for all
return
out = np.zeros_like(o)
o2 = function(self.ma, *args, out=out, **kwargs)
assert o2 is out
assert_masked_equal(o2, o)
with pytest.raises(TypeError):
function(self.ma, *args, out=np.zeros_like(expected), **kwargs)
@pytest.mark.parametrize('axis', [None, 0, 1])
def test_median(self, axis):
self.check(np.median, axis=axis)
@pytest.mark.parametrize('axis', [None, 0, 1])
def test_quantile(self, axis):
self.check(np.quantile, q=[0.25, 0.5], axis=axis)
def test_quantile_out_of_range(self):
with pytest.raises(ValueError, match='must be in the range'):
np.quantile(self.ma, q=1.5)
@pytest.mark.parametrize('axis', [None, 0, 1])
def test_percentile(self, axis):
self.check(np.percentile, q=50, axis=axis)
class TestIntDiffFunctions(MaskedArraySetup):
def test_diff(self):
out = np.diff(self.ma)
expected = np.diff(self.a)
expected_mask = self.mask_a[:, 1:] | self.mask_a[:, :-1]
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_diff_prepend_append(self):
out = np.diff(self.ma, prepend=Masked(-1, mask=True), append=1)
expected = np.diff(self.a, prepend=-1, append=1.)
mask = np.concatenate([np.ones((2, 1), bool),
self.mask_a,
np.zeros((2, 1), bool)], axis=-1)
expected_mask = mask[:, 1:] | mask[:, :-1]
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_trapz(self):
ma = self.ma.copy()
ma.mask[1] = False
out = np.trapz(ma)
assert_array_equal(out.unmasked, np.trapz(self.a))
assert_array_equal(out.mask, np.array([True, False]))
def test_gradient(self):
out = np.gradient(self.ma)
expected = np.gradient(self.a)
expected_mask = [(self.mask_a[1:] | self.mask_a[:-1]).repeat(2, axis=0),
np.stack([
self.mask_a[:, 0] | self.mask_a[:, 1],
self.mask_a[:, 0] | self.mask_a[:, 2],
self.mask_a[:, 1] | self.mask_a[:, 2]], axis=-1)]
for o, x, m in zip(out, expected, expected_mask):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, m)
class TestSpaceFunctions:
@classmethod
def setup_class(self):
self.a = np.arange(1., 7.).reshape(2, 3)
self.mask_a = np.array([[True, False, False],
[False, True, False]])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([2.5, 10., 3.])
self.mask_b = np.array([False, True, False])
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, function, *args, **kwargs):
out = function(self.ma, self.mb, 5)
expected = function(self.a, self.b, 5)
expected_mask = np.broadcast_to(self.mask_a | self.mask_b,
expected.shape).copy()
# TODO: make implementation that also ensures start point mask is
# determined just by start point? (as for geomspace in numpy 1.20)?
expected_mask[-1] = self.mask_b
if not NUMPY_LT_1_20 and function is np.geomspace:
expected_mask[0] = self.mask_a
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_linspace(self):
self.check(np.linspace, 5)
def test_logspace(self):
self.check(np.logspace, 10)
def test_geomspace(self):
self.check(np.geomspace, 5)
class TestInterpolationFunctions(MaskedArraySetup):
def test_interp(self):
xp = np.arange(5.)
fp = np.array([1., 5., 6., 19., 20.])
mask_fp = np.array([False, False, False, True, False])
mfp = Masked(fp, mask=mask_fp)
x = np.array([1.5, 17.])
mask_x = np.array([False, True])
mx = Masked(x, mask=mask_x)
out = np.interp(mx, xp, mfp)
expected = np.interp(x, xp[~mask_fp], fp[~mask_fp])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_x)
def test_piecewise(self):
condlist = [self.a < 1, self.a >= 1]
out = np.piecewise(self.ma, condlist, [Masked(-1, mask=True), 1.])
expected = np.piecewise(self.a, condlist, [-1, 1.])
expected_mask = np.piecewise(self.mask_a, condlist, [True, False])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
condlist2 = [self.a < 1, self.a >= 3]
out2 = np.piecewise(self.ma, condlist2,
[Masked(-1, True), 1, lambda x: Masked(np.full_like(x, 2.),
mask=~x.mask)])
expected = np.piecewise(self.a, condlist2, [-1, 1, 2])
expected_mask = np.piecewise(self.mask_a, condlist2,
[True, False, lambda x: ~x])
assert_array_equal(out2.unmasked, expected)
assert_array_equal(out2.mask, expected_mask)
with pytest.raises(ValueError, match='with 2 condition'):
np.piecewise(self.ma, condlist2, [])
def test_regression_12978(self):
"""Regression tests for https://github.com/astropy/astropy/pull/12978"""
# This case produced incorrect results
mask = [False, True, False]
x = np.array([1, 2, 3])
xp = Masked(np.array([1, 2, 3]), mask=mask)
fp = Masked(np.array([1, 2, 3]), mask=mask)
result = np.interp(x, xp, fp)
assert_array_equal(result, x)
# This case raised a ValueError
xp = np.array([1, 3])
fp = Masked(np.array([1, 3]))
result = np.interp(x, xp, fp)
assert_array_equal(result, x)
class TestBincount(MaskedArraySetup):
def test_bincount(self):
i = np.array([1, 1, 2, 3, 2, 4])
mask_i = np.array([True, False, False, True, False, False])
mi = Masked(i, mask=mask_i)
out = np.bincount(mi)
expected = np.bincount(i[~mask_i])
assert_array_equal(out, expected)
w = np.arange(len(i))
mask_w = np.array([True]+[False]*5)
mw = Masked(w, mask=mask_w)
out2 = np.bincount(i, mw)
expected = np.bincount(i, w)
expected_mask = np.array([False, True, False, False, False])
assert_array_equal(out2.unmasked, expected)
assert_array_equal(out2.mask, expected_mask)
out3 = np.bincount(mi, mw)
expected = np.bincount(i[~mask_i], w[~mask_i])
expected_mask = np.array([False, False, False, False, False])
assert_array_equal(out3.unmasked, expected)
assert_array_equal(out3.mask, expected_mask)
class TestSortFunctions(MaskedArraySetup):
def test_sort(self):
o = np.sort(self.ma)
expected = self.ma.copy()
expected.sort()
assert_masked_equal(o, expected)
def test_sort_complex(self):
ma = Masked(np.array([1+2j, 0+4j, 3+0j, -1-1j]),
mask=[True, False, False, False])
o = np.sort_complex(ma)
indx = np.lexsort((ma.unmasked.imag, ma.unmasked.real, ma.mask))
expected = ma[indx]
assert_masked_equal(o, expected)
def test_msort(self):
o = np.msort(self.ma)
expected = np.sort(self.ma, axis=0)
assert_masked_equal(o, expected)
def test_partition(self):
o = np.partition(self.ma, 1)
expected = self.ma.copy()
expected.partition(1)
assert_masked_equal(o, expected)
class TestStringFunctions:
# More elaborate tests done in test_masked.py
@classmethod
def setup_class(self):
self.ma = Masked(np.arange(3), mask=[True, False, False])
def test_array2string(self):
out0 = np.array2string(self.ma)
assert out0 == '[— 1 2]'
# Arguments are interpreted as usual.
out1 = np.array2string(self.ma, separator=', ')
assert out1 == '[—, 1, 2]'
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.ma, separator=', ', formatter={'all': hex})
assert out2 == '[———, 0x1, 0x2]'
# Also as positional argument (no, nobody will do this!)
out3 = np.array2string(self.ma, None, None, None, ', ', '',
np._NoValue, {'int': hex})
assert out3 == out2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.ma, separator=', ', formatter={'float': hex})
assert out4 == out1
def test_array_repr(self):
out = np.array_repr(self.ma)
assert out == 'MaskedNDArray([—, 1, 2])'
ma2 = self.ma.astype('f4')
out2 = np.array_repr(ma2)
assert out2 == 'MaskedNDArray([——, 1., 2.], dtype=float32)'
def test_array_str(self):
out = np.array_str(self.ma)
assert out == '[— 1 2]'
class TestBitFunctions:
@classmethod
def setup_class(self):
self.a = np.array([15, 255, 0], dtype='u1')
self.mask_a = np.array([False, True, False])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.unpackbits(self.a).reshape(6, 4)
self.mask_b = np.array([False]*15 + [True, True] + [False]*7).reshape(6, 4)
self.mb = Masked(self.b, mask=self.mask_b)
@pytest.mark.parametrize('axis', [None, 1, 0])
def test_packbits(self, axis):
out = np.packbits(self.mb, axis=axis)
if axis is None:
expected = self.a
else:
expected = np.packbits(self.b, axis=axis)
expected_mask = np.packbits(self.mask_b, axis=axis) > 0
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_unpackbits(self):
out = np.unpackbits(self.ma)
mask = np.where(self.mask_a, np.uint8(255), np.uint8(0))
expected_mask = np.unpackbits(mask) > 0
assert_array_equal(out.unmasked, self.b.ravel())
assert_array_equal(out.mask, expected_mask)
class TestIndexFunctions(MaskedArraySetup):
"""Does not seem much sense to support these..."""
def test_unravel_index(self):
with pytest.raises(TypeError):
np.unravel_index(self.ma, 3)
def test_ravel_multi_index(self):
with pytest.raises(TypeError):
np.ravel_multi_index((self.ma,), 3)
def test_ix_(self):
with pytest.raises(TypeError):
np.ix_(self.ma)
class TestDtypeFunctions(MaskedArraySetup):
def check(self, function, *args, **kwargs):
out = function(self.ma, *args, **kwargs)
expected = function(self.a, *args, **kwargs)
assert out == expected
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.a.dtype)
self.check(np.can_cast, 'f4')
def test_min_scalar_type(self):
out = np.min_scalar_type(self.ma[0, 0])
expected = np.min_scalar_type(self.a[0, 0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
class TestMeshGrid(MaskedArraySetup):
def test_meshgrid(self):
a = np.arange(1., 4.)
mask_a = np.array([True, False, False])
ma = Masked(a, mask=mask_a)
b = np.array([2.5, 10., 3., 4.])
mask_b = np.array([False, True, False, True])
mb = Masked(b, mask=mask_b)
oa, ob = np.meshgrid(ma, mb)
xa, xb = np.broadcast_arrays(a, b[:, np.newaxis])
ma, mb = np.broadcast_arrays(mask_a, mask_b[:, np.newaxis])
for o, x, m in ((oa, xa, ma), (ob, xb, mb)):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, m)
class TestMemoryFunctions(MaskedArraySetup):
def test_shares_memory(self):
assert np.shares_memory(self.ma, self.ma.unmasked)
assert not np.shares_memory(self.ma, self.ma.mask)
def test_may_share_memory(self):
assert np.may_share_memory(self.ma, self.ma.unmasked)
assert not np.may_share_memory(self.ma, self.ma.mask)
class TestDatetimeFunctions:
# Could in principle support np.is_busday, np.busday_count, np.busday_offset.
@classmethod
def setup_class(self):
self.a = np.array(['2020-12-31', '2021-01-01', '2021-01-02'], dtype='M')
self.mask_a = np.array([False, True, False])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([['2021-01-07'], ['2021-01-31']], dtype='M')
self.mask_b = np.array([[False], [True]])
self.mb = Masked(self.b, mask=self.mask_b)
def test_datetime_as_string(self):
out = np.datetime_as_string(self.ma)
expected = np.datetime_as_string(self.a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
@pytest.mark.filterwarnings('ignore:all-nan')
class TestNaNFunctions:
def setup_class(self):
self.a = np.array([[np.nan, np.nan, 3.],
[4., 5., 6.]])
self.mask_a = np.array([[True, False, False],
[False, True, False]])
self.b = np.arange(1, 7).reshape(2, 3)
self.mask_b = self.mask_a
self.ma = Masked(self.a, mask=self.mask_a)
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, function, exact_fill_value=None, masked_result=True,
**kwargs):
result = function(self.ma, **kwargs)
expected_data = function(self.ma.filled(np.nan), **kwargs)
expected_mask = np.isnan(expected_data)
if masked_result:
assert isinstance(result, Masked)
assert_array_equal(result.mask, expected_mask)
assert np.all(result == expected_data)
else:
assert not isinstance(result, Masked)
assert_array_equal(result, expected_data)
assert not np.any(expected_mask)
out = np.zeros_like(result)
result2 = function(self.ma, out=out, **kwargs)
assert result2 is out
assert_array_equal(result2, result)
def check_arg(self, function, **kwargs):
# arg functions do not have an 'out' argument, so just test directly.
result = function(self.ma, **kwargs)
assert not isinstance(result, Masked)
expected = function(self.ma.filled(np.nan), **kwargs)
assert_array_equal(result, expected)
def test_nanmin(self):
self.check(np.nanmin)
self.check(np.nanmin, axis=0)
self.check(np.nanmin, axis=1)
resi = np.nanmin(self.mb, axis=1)
assert_array_equal(resi.unmasked, np.array([2, 4]))
assert_array_equal(resi.mask, np.array([False, False]))
def test_nanmax(self):
self.check(np.nanmax)
def test_nanargmin(self):
self.check_arg(np.nanargmin)
self.check_arg(np.nanargmin, axis=1)
def test_nanargmax(self):
self.check_arg(np.nanargmax)
def test_nansum(self):
self.check(np.nansum, masked_result=False)
resi = np.nansum(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([5, 10]))
def test_nanprod(self):
self.check(np.nanprod, masked_result=False)
resi = np.nanprod(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([6, 24]))
def test_nancumsum(self):
self.check(np.nancumsum, masked_result=False)
resi = np.nancumsum(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([[0, 2, 5], [4, 4, 10]]))
def test_nancumprod(self):
self.check(np.nancumprod, masked_result=False)
resi = np.nancumprod(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([[1, 2, 6], [4, 4, 24]]))
def test_nanmean(self):
self.check(np.nanmean)
resi = np.nanmean(self.mb, axis=1)
assert_array_equal(resi.unmasked, np.mean(self.mb, axis=1).unmasked)
assert_array_equal(resi.mask, np.array([False, False]))
def test_nanvar(self):
self.check(np.nanvar)
self.check(np.nanvar, ddof=1)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nanquantile(self):
self.check(np.nanquantile, q=0.5)
def test_nanpercentile(self):
self.check(np.nanpercentile, q=50)
untested_functions = set()
if NUMPY_LT_1_20:
financial_functions = {f for f in all_wrapped_functions.values()
if f in np.lib.financial.__dict__.values()}
untested_functions |= financial_functions
if NUMPY_LT_1_23:
deprecated_functions = {
# Deprecated, removed in numpy 1.23
np.asscalar, np.alen,
}
else:
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander
}
untested_functions |= poly_functions
# Get covered functions
tested_functions = set()
for cov_cls in list(filter(inspect.isclass, locals().values())):
for k, v in cov_cls.__dict__.items():
if inspect.isfunction(v) and k.startswith('test'):
f = k.replace('test_', '')
if f in all_wrapped_functions:
tested_functions.add(all_wrapped_functions[f])
def test_basic_testing_completeness():
assert all_wrapped == (tested_functions
| IGNORED_FUNCTIONS
| UNSUPPORTED_FUNCTIONS)
@pytest.mark.xfail(reason='coverage not completely set up yet')
def test_testing_completeness():
assert not tested_functions.intersection(untested_functions)
assert all_wrapped == (tested_functions | untested_functions)
class TestFunctionHelpersCompleteness:
@pytest.mark.parametrize('one, two', itertools.combinations(
(MASKED_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
set(APPLY_TO_BOTH_FUNCTIONS.keys()),
set(DISPATCHED_FUNCTIONS.keys())), 2))
def test_no_duplicates(self, one, two):
assert not one.intersection(two)
def test_all_included(self):
included_in_helpers = (MASKED_SAFE_FUNCTIONS |
UNSUPPORTED_FUNCTIONS |
set(APPLY_TO_BOTH_FUNCTIONS.keys()) |
set(DISPATCHED_FUNCTIONS.keys()))
assert all_wrapped == included_in_helpers
@pytest.mark.xfail(reason='coverage not completely set up yet')
def test_ignored_are_untested(self):
assert IGNORED_FUNCTIONS == untested_functions
|
3e48d52f2c60cabc657386b55e64e5ae5321988b6a2056ea58790dd09c7332a9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from functools import partial
from collections import defaultdict
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist
from matplotlib.axes import Axes, subplot_class_factory
from matplotlib.transforms import Affine2D, Bbox, Transform
import astropy.units as u
from astropy.coordinates import SkyCoord, BaseCoordinateFrame
from astropy.utils import minversion
from astropy.utils.compat.optional_deps import HAS_PIL
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS, BaseLowLevelWCS
from .transforms import CoordinateTransform
from .coordinates_map import CoordinatesMap
from .utils import get_coord_meta, transform_contour_set_inplace
from .frame import RectangularFrame, RectangularFrame1D
from .wcsapi import IDENTITY, transform_coord_meta_from_wcs
__all__ = ['WCSAxes', 'WCSAxesSubplot']
VISUAL_PROPERTIES = ['facecolor', 'edgecolor', 'linewidth', 'alpha', 'linestyle']
class _WCSAxesArtist(Artist):
"""This is a dummy artist to enforce the correct z-order of axis ticks,
tick labels, and gridlines.
FIXME: This is a bit of a hack. ``Axes.draw`` sorts the artists by zorder
and then renders them in sequence. For normal Matplotlib axes, the ticks,
tick labels, and gridlines are included in this list of artists and hence
are automatically drawn in the correct order. However, ``WCSAxes`` disables
the native ticks, labels, and gridlines. Instead, ``WCSAxes.draw`` renders
ersatz ticks, labels, and gridlines by explicitly calling the functions
``CoordinateHelper._draw_ticks``, ``CoordinateHelper._draw_grid``, etc.
This hack would not be necessary if ``WCSAxes`` drew ticks, tick labels,
and gridlines in the standary way."""
def draw(self, renderer, *args, **kwargs):
self.axes.draw_wcsaxes(renderer)
class WCSAxes(Axes):
"""
The main axes class that can be used to show world coordinates from a WCS.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure to add the axes to
rect : list
The position of the axes in the figure in relative units. Should be
given as ``[left, bottom, width, height]``.
wcs : :class:`~astropy.wcs.WCS`, optional
The WCS for the data. If this is specified, ``transform`` cannot be
specified.
transform : `~matplotlib.transforms.Transform`, optional
The transform for the data. If this is specified, ``wcs`` cannot be
specified.
coord_meta : dict, optional
A dictionary providing additional metadata when ``transform`` is
specified. This should include the keys ``type``, ``wrap``, and
``unit``. Each of these should be a list with as many items as the
dimension of the WCS. The ``type`` entries should be one of
``longitude``, ``latitude``, or ``scalar``, the ``wrap`` entries should
give, for the longitude, the angle at which the coordinate wraps (and
`None` otherwise), and the ``unit`` should give the unit of the
coordinates as :class:`~astropy.units.Unit` instances. This can
optionally also include a ``format_unit`` entry giving the units to use
for the tick labels (if not specified, this defaults to ``unit``).
transData : `~matplotlib.transforms.Transform`, optional
Can be used to override the default data -> pixel mapping.
slices : tuple, optional
For WCS transformations with more than two dimensions, we need to
choose which dimensions are being shown in the 2D image. The slice
should contain one ``x`` entry, one ``y`` entry, and the rest of the
values should be integers indicating the slice through the data. The
order of the items in the slice should be the same as the order of the
dimensions in the :class:`~astropy.wcs.WCS`, and the opposite of the
order of the dimensions in Numpy. For example, ``(50, 'x', 'y')`` means
that the first WCS dimension (last Numpy dimension) will be sliced at
an index of 50, the second WCS and Numpy dimension will be shown on the
x axis, and the final WCS dimension (first Numpy dimension) will be
shown on the y-axis (and therefore the data will be plotted using
``data[:, :, 50].transpose()``)
frame_class : type, optional
The class for the frame, which should be a subclass of
:class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a
:class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`
"""
def __init__(self, fig, rect, wcs=None, transform=None, coord_meta=None,
transData=None, slices=None, frame_class=None,
**kwargs):
"""
"""
super().__init__(fig, rect, **kwargs)
self._bboxes = []
if frame_class is not None:
self.frame_class = frame_class
elif (wcs is not None and (wcs.pixel_n_dim == 1 or
(slices is not None and 'y' not in slices))):
self.frame_class = RectangularFrame1D
else:
self.frame_class = RectangularFrame
if not (transData is None):
# User wants to override the transform for the final
# data->pixel mapping
self.transData = transData
self.reset_wcs(wcs=wcs, slices=slices, transform=transform, coord_meta=coord_meta)
self._hide_parent_artists()
self.format_coord = self._display_world_coords
self._display_coords_index = 0
fig.canvas.mpl_connect('key_press_event', self._set_cursor_prefs)
self.patch = self.coords.frame.patch
self._wcsaxesartist = _WCSAxesArtist()
self.add_artist(self._wcsaxesartist)
self._drawn = False
def _display_world_coords(self, x, y):
if not self._drawn:
return ""
if self._display_coords_index == -1:
return f"{x} {y} (pixel)"
pixel = np.array([x, y])
coords = self._all_coords[self._display_coords_index]
world = coords._transform.transform(np.array([pixel]))[0]
coord_strings = []
for idx, coord in enumerate(coords):
if coord.coord_index is not None:
coord_strings.append(coord.format_coord(world[coord.coord_index], format='ascii'))
coord_string = ' '.join(coord_strings)
if self._display_coords_index == 0:
system = "world"
else:
system = f"world, overlay {self._display_coords_index}"
coord_string = f"{coord_string} ({system})"
return coord_string
def _set_cursor_prefs(self, event, **kwargs):
if event.key == 'w':
self._display_coords_index += 1
if self._display_coords_index + 1 > len(self._all_coords):
self._display_coords_index = -1
def _hide_parent_artists(self):
# Turn off spines and current axes
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
if self.frame_class is not RectangularFrame1D:
self.yaxis.set_visible(False)
# We now overload ``imshow`` because we need to make sure that origin is
# set to ``lower`` for all images, which means that we need to flip RGB
# images.
def imshow(self, X, *args, **kwargs):
"""
Wrapper to Matplotlib's :meth:`~matplotlib.axes.Axes.imshow`.
If an RGB image is passed as a PIL object, it will be flipped
vertically and ``origin`` will be set to ``lower``, since WCS
transformations - like FITS files - assume that the origin is the lower
left pixel of the image (whereas RGB images have the origin in the top
left).
All arguments are passed to :meth:`~matplotlib.axes.Axes.imshow`.
"""
origin = kwargs.pop('origin', 'lower')
# plt.imshow passes origin as None, which we should default to lower.
if origin is None:
origin = 'lower'
elif origin == 'upper':
raise ValueError("Cannot use images with origin='upper' in WCSAxes.")
if HAS_PIL:
from PIL.Image import Image
if minversion('PIL', '9.1'):
from PIL.Image import Transpose
FLIP_TOP_BOTTOM = Transpose.FLIP_TOP_BOTTOM
else:
from PIL.Image import FLIP_TOP_BOTTOM
if isinstance(X, Image) or hasattr(X, 'getpixel'):
X = X.transpose(FLIP_TOP_BOTTOM)
return super().imshow(X, *args, origin=origin, **kwargs)
def contour(self, *args, **kwargs):
"""
Plot contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contour`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contour`.
"""
# In Matplotlib, when calling contour() with a transform, each
# individual path in the contour map is transformed separately. However,
# this is much too slow for us since each call to the transforms results
# in an Astropy coordinate transformation, which has a non-negligible
# overhead - therefore a better approach is to override contour(), call
# the Matplotlib one with no transform, then apply the transform in one
# go to all the segments that make up the contour map.
transform = kwargs.pop('transform', None)
cset = super().contour(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def contourf(self, *args, **kwargs):
"""
Plot filled contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contourf`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contourf`.
"""
# See notes for contour above.
transform = kwargs.pop('transform', None)
cset = super().contourf(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def plot_coord(self, *args, **kwargs):
"""
Plot `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.BaseCoordinateFrame` objects onto the axes.
The first argument to
:meth:`~astropy.visualization.wcsaxes.WCSAxes.plot_coord` should be a
coordinate, which will then be converted to the first two parameters to
`matplotlib.axes.Axes.plot`. All other arguments are the same as
`matplotlib.axes.Axes.plot`. If not specified a ``transform`` keyword
argument will be created based on the coordinate.
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate object to plot on the axes. This is converted to the
first two arguments to `matplotlib.axes.Axes.plot`.
See Also
--------
matplotlib.axes.Axes.plot :
This method is called from this function with all arguments passed to it.
"""
if isinstance(args[0], (SkyCoord, BaseCoordinateFrame)):
# Extract the frame from the first argument.
frame0 = args[0]
if isinstance(frame0, SkyCoord):
frame0 = frame0.frame
native_frame = self._transform_pixel2world.frame_out
# Transform to the native frame of the plot
frame0 = frame0.transform_to(native_frame)
plot_data = []
for coord in self.coords:
if coord.coord_type == 'longitude':
plot_data.append(frame0.spherical.lon.to_value(u.deg))
elif coord.coord_type == 'latitude':
plot_data.append(frame0.spherical.lat.to_value(u.deg))
else:
raise NotImplementedError("Coordinates cannot be plotted with this "
"method because the WCS does not represent longitude/latitude.")
if 'transform' in kwargs.keys():
raise TypeError("The 'transform' keyword argument is not allowed,"
" as it is automatically determined by the input coordinate frame.")
transform = self.get_transform(native_frame)
kwargs.update({'transform': transform})
args = tuple(plot_data) + args[1:]
return super().plot(*args, **kwargs)
def reset_wcs(self, wcs=None, slices=None, transform=None, coord_meta=None):
"""
Reset the current Axes, to use a new WCS object.
"""
# Here determine all the coordinate axes that should be shown.
if wcs is None and transform is None:
self.wcs = IDENTITY
else:
# We now force call 'set', which ensures the WCS object is
# consistent, which will only be important if the WCS has been set
# by hand. For example if the user sets a celestial WCS by hand and
# forgets to set the units, WCS.wcs.set() will do this.
if wcs is not None:
# Check if the WCS object is an instance of `astropy.wcs.WCS`
# This check is necessary as only `astropy.wcs.WCS` supports
# wcs.set() method
if isinstance(wcs, WCS):
wcs.wcs.set()
if isinstance(wcs, BaseHighLevelWCS):
wcs = wcs.low_level_wcs
self.wcs = wcs
# If we are making a new WCS, we need to preserve the path object since
# it may already be used by objects that have been plotted, and we need
# to continue updating it. CoordinatesMap will create a new frame
# instance, but we can tell that instance to keep using the old path.
if hasattr(self, 'coords'):
previous_frame = {'path': self.coords.frame._path,
'color': self.coords.frame.get_color(),
'linewidth': self.coords.frame.get_linewidth()}
else:
previous_frame = {'path': None}
if self.wcs is not None:
transform, coord_meta = transform_coord_meta_from_wcs(self.wcs, self.frame_class, slices=slices)
self.coords = CoordinatesMap(self,
transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class,
previous_frame_path=previous_frame['path'])
self._transform_pixel2world = transform
if previous_frame['path'] is not None:
self.coords.frame.set_color(previous_frame['color'])
self.coords.frame.set_linewidth(previous_frame['linewidth'])
self._all_coords = [self.coords]
# Common default settings for Rectangular Frame
for ind, pos in enumerate(coord_meta.get('default_axislabel_position', ['b', 'l'])):
self.coords[ind].set_axislabel_position(pos)
for ind, pos in enumerate(coord_meta.get('default_ticklabel_position', ['b', 'l'])):
self.coords[ind].set_ticklabel_position(pos)
for ind, pos in enumerate(coord_meta.get('default_ticks_position', ['bltr', 'bltr'])):
self.coords[ind].set_ticks_position(pos)
if rcParams['axes.grid']:
self.grid()
def draw_wcsaxes(self, renderer):
if not self.axison:
return
# Here need to find out range of all coordinates, and update range for
# each coordinate axis. For now, just assume it covers the whole sky.
self._bboxes = []
# This generates a structure like [coords][axis] = [...]
ticklabels_bbox = defaultdict(partial(defaultdict, list))
visible_ticks = []
for coords in self._all_coords:
coords.frame.update()
for coord in coords:
coord._draw_grid(renderer)
for coords in self._all_coords:
for coord in coords:
coord._draw_ticks(renderer, bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox[coord])
visible_ticks.extend(coord.ticklabels.get_visible_axes())
for coords in self._all_coords:
for coord in coords:
coord._draw_axislabels(renderer, bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox,
visible_ticks=visible_ticks)
self.coords.frame.draw(renderer)
def draw(self, renderer, **kwargs):
"""Draw the axes."""
# Before we do any drawing, we need to remove any existing grid lines
# drawn with contours, otherwise if we try and remove the contours
# part way through drawing, we end up with the issue mentioned in
# https://github.com/astropy/astropy/issues/12446
for coords in self._all_coords:
for coord in coords:
coord._clear_grid_contour()
# In Axes.draw, the following code can result in the xlim and ylim
# values changing, so we need to force call this here to make sure that
# the limits are correct before we update the patch.
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
if self._axisbelow is True:
self._wcsaxesartist.set_zorder(0.5)
elif self._axisbelow is False:
self._wcsaxesartist.set_zorder(2.5)
else:
# 'line': above patches, below lines
self._wcsaxesartist.set_zorder(1.5)
# We need to make sure that that frame path is up to date
self.coords.frame._update_patch_path()
super().draw(renderer, **kwargs)
self._drawn = True
# Matplotlib internally sometimes calls set_xlabel(label=...).
def set_xlabel(self, xlabel=None, labelpad=1, loc=None, **kwargs):
"""Set x-label."""
if xlabel is None:
xlabel = kwargs.pop('label', None)
if xlabel is None:
raise TypeError("set_xlabel() missing 1 required positional argument: 'xlabel'")
for coord in self.coords:
if ('b' in coord.axislabels.get_visible_axes() or
'h' in coord.axislabels.get_visible_axes()):
coord.set_axislabel(xlabel, minpad=labelpad, **kwargs)
break
def set_ylabel(self, ylabel=None, labelpad=1, loc=None, **kwargs):
"""Set y-label"""
if ylabel is None:
ylabel = kwargs.pop('label', None)
if ylabel is None:
raise TypeError("set_ylabel() missing 1 required positional argument: 'ylabel'")
if self.frame_class is RectangularFrame1D:
return super().set_ylabel(ylabel, labelpad=labelpad, **kwargs)
for coord in self.coords:
if ('l' in coord.axislabels.get_visible_axes() or
'c' in coord.axislabels.get_visible_axes()):
coord.set_axislabel(ylabel, minpad=labelpad, **kwargs)
break
def get_xlabel(self):
for coord in self.coords:
if ('b' in coord.axislabels.get_visible_axes() or
'h' in coord.axislabels.get_visible_axes()):
return coord.get_axislabel()
def get_ylabel(self):
if self.frame_class is RectangularFrame1D:
return super().get_ylabel()
for coord in self.coords:
if ('l' in coord.axislabels.get_visible_axes() or
'c' in coord.axislabels.get_visible_axes()):
return coord.get_axislabel()
def get_coords_overlay(self, frame, coord_meta=None):
# Here we can't use get_transform because that deals with
# pixel-to-pixel transformations when passing a WCS object.
if isinstance(frame, WCS):
transform, coord_meta = transform_coord_meta_from_wcs(frame, self.frame_class)
else:
transform = self._get_transform_no_transdata(frame)
if coord_meta is None:
coord_meta = get_coord_meta(frame)
coords = CoordinatesMap(self, transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class)
self._all_coords.append(coords)
# Common settings for overlay
coords[0].set_axislabel_position('t')
coords[1].set_axislabel_position('r')
coords[0].set_ticklabel_position('t')
coords[1].set_ticklabel_position('r')
self.overlay_coords = coords
return coords
def get_transform(self, frame):
"""
Return a transform from the specified frame to display coordinates.
This does not include the transData transformation
Parameters
----------
frame : :class:`~astropy.wcs.WCS` or :class:`~matplotlib.transforms.Transform` or str
The ``frame`` parameter can have several possible types:
* :class:`~astropy.wcs.WCS` instance: assumed to be a
transformation from pixel to world coordinates, where the
world coordinates are the same as those in the WCS
transformation used for this ``WCSAxes`` instance. This is
used for example to show contours, since this involves
plotting an array in pixel coordinates that are not the
final data coordinate and have to be transformed to the
common world coordinate system first.
* :class:`~matplotlib.transforms.Transform` instance: it is
assumed to be a transform to the world coordinates that are
part of the WCS used to instantiate this ``WCSAxes``
instance.
* ``'pixel'`` or ``'world'``: return a transformation that
allows users to plot in pixel/data coordinates (essentially
an identity transform) and ``world`` (the default
world-to-pixel transformation used to instantiate the
``WCSAxes`` instance).
* ``'fk5'`` or ``'galactic'``: return a transformation from
the specified frame to the pixel/data coordinates.
* :class:`~astropy.coordinates.BaseCoordinateFrame` instance.
"""
return self._get_transform_no_transdata(frame).inverted() + self.transData
def _get_transform_no_transdata(self, frame):
"""
Return a transform from data to the specified frame
"""
if isinstance(frame, (BaseLowLevelWCS, BaseHighLevelWCS)):
if isinstance(frame, BaseHighLevelWCS):
frame = frame.low_level_wcs
transform, coord_meta = transform_coord_meta_from_wcs(frame, self.frame_class)
transform_world2pixel = transform.inverted()
if self._transform_pixel2world.frame_out == transform_world2pixel.frame_in:
return self._transform_pixel2world + transform_world2pixel
else:
return (self._transform_pixel2world +
CoordinateTransform(self._transform_pixel2world.frame_out,
transform_world2pixel.frame_in) +
transform_world2pixel)
elif isinstance(frame, str) and frame == 'pixel':
return Affine2D()
elif isinstance(frame, Transform):
return self._transform_pixel2world + frame
else:
if isinstance(frame, str) and frame == 'world':
return self._transform_pixel2world
else:
coordinate_transform = CoordinateTransform(self._transform_pixel2world.frame_out, frame)
if coordinate_transform.same_frames:
return self._transform_pixel2world
else:
return self._transform_pixel2world + coordinate_transform
def get_tightbbox(self, renderer, *args, **kwargs):
# FIXME: we should determine what to do with the extra arguments here.
# Note that the expected signature of this method is different in
# Matplotlib 3.x compared to 2.x, but we only support 3.x now.
if not self.get_visible():
return
bb = [b for b in self._bboxes if b and (b.width != 0 or b.height != 0)]
bb.append(super().get_tightbbox(renderer, *args, **kwargs))
if bb:
_bbox = Bbox.union(bb)
return _bbox
else:
return self.get_window_extent(renderer)
def grid(self, b=None, axis='both', *, which='major', **kwargs):
"""
Plot gridlines for both coordinates.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments. This behaves like `matplotlib.axes.Axes`
except that if no arguments are specified, the grid is shown rather
than toggled.
Parameters
----------
b : bool
Whether to show the gridlines.
axis : 'both', 'x', 'y'
Which axis to turn the gridlines on/off for.
which : str
Currently only ``'major'`` is supported.
"""
if not hasattr(self, 'coords'):
return
if which != 'major':
raise NotImplementedError('Plotting the grid for the minor ticks is '
'not supported.')
if axis == 'both':
self.coords.grid(draw_grid=b, **kwargs)
elif axis == 'x':
self.coords[0].grid(draw_grid=b, **kwargs)
elif axis == 'y':
self.coords[1].grid(draw_grid=b, **kwargs)
else:
raise ValueError('axis should be one of x/y/both')
def tick_params(self, axis='both', **kwargs):
"""
Method to set the tick and tick label parameters in the same way as the
:meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib.
This is provided for convenience, but the recommended API is to use
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`,
and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Parameters
----------
axis : int or str, optional
Which axis to apply the parameters to. This defaults to 'both'
but this can also be set to an `int` or `str` that refers to the
axis to apply it to, following the valid values that can index
``ax.coords``. Note that ``'x'`` and ``'y``' are also accepted in
the case of rectangular axes.
which : {'both', 'major', 'minor'}, optional
Which ticks to apply the settings to. By default, setting are
applied to both major and minor ticks. Note that if ``'minor'`` is
specified, only the length of the ticks can be set currently.
direction : {'in', 'out'}, optional
Puts ticks inside the axes, or outside the axes.
length : float, optional
Tick length in points.
width : float, optional
Tick width in points.
color : color, optional
Tick color (accepts any valid Matplotlib color)
pad : float, optional
Distance in points between tick and label.
labelsize : float or str, optional
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color, optional
Tick label color (accepts any valid Matplotlib color)
colors : color, optional
Changes the tick color and the label color to the same value
(accepts any valid Matplotlib color).
bottom, top, left, right : bool, optional
Where to draw the ticks. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
labelbottom, labeltop, labelleft, labelright : bool, optional
Where to draw the tick labels. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
grid_color : color, optional
The color of the grid lines (accepts any valid Matplotlib color).
grid_alpha : float, optional
Transparency of grid lines: 0 (transparent) to 1 (opaque).
grid_linewidth : float, optional
Width of grid lines in points.
grid_linestyle : str, optional
The style of the grid lines (accepts any valid Matplotlib line
style).
"""
if not hasattr(self, 'coords'):
# Axes haven't been fully initialized yet, so just ignore, as
# Axes.__init__ calls this method
return
if axis == 'both':
for pos in ('bottom', 'left', 'top', 'right'):
if pos in kwargs:
raise ValueError(f"Cannot specify {pos}= when axis='both'")
if 'label' + pos in kwargs:
raise ValueError(f"Cannot specify label{pos}= when axis='both'")
for coord in self.coords:
coord.tick_params(**kwargs)
elif axis in self.coords:
self.coords[axis].tick_params(**kwargs)
elif axis in ('x', 'y') and self.frame_class is RectangularFrame:
spine = 'b' if axis == 'x' else 'l'
for coord in self.coords:
if spine in coord.axislabels.get_visible_axes():
coord.tick_params(**kwargs)
# In the following, we put the generated subplot class in a temporary class and
# we then inherit it - if we don't do this, the generated class appears to
# belong in matplotlib, not in WCSAxes, from the API's point of view.
class WCSAxesSubplot(subplot_class_factory(WCSAxes)):
"""
A subclass class for WCSAxes
"""
pass
|
5d0a79e99390c9e4c986aab96b852a9e87add76da34bfa5112b71a548cfbb733 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import defaultdict
import numpy as np
from matplotlib import rcParams
from matplotlib.text import Text
from .frame import RectangularFrame
def sort_using(X, Y):
return [x for (y, x) in sorted(zip(Y, X))]
class TickLabels(Text):
def __init__(self, frame, *args, **kwargs):
self.clear()
self._frame = frame
super().__init__(*args, **kwargs)
self.set_clip_on(True)
self.set_visible_axes('all')
self.set_pad(rcParams['xtick.major.pad'])
self._exclude_overlapping = False
# Stale if either xy positions haven't been calculated, or if
# something changes that requires recomputing the positions
self._stale = True
# Check rcParams
if 'color' not in kwargs:
self.set_color(rcParams['xtick.color'])
if 'size' not in kwargs:
self.set_size(rcParams['xtick.labelsize'])
def clear(self):
self.world = defaultdict(list)
self.pixel = defaultdict(list)
self.angle = defaultdict(list)
self.text = defaultdict(list)
self.disp = defaultdict(list)
def add(self, axis, world, pixel, angle, text, axis_displacement):
self.world[axis].append(world)
self.pixel[axis].append(pixel)
self.angle[axis].append(angle)
self.text[axis].append(text)
self.disp[axis].append(axis_displacement)
self._stale = True
def sort(self):
"""
Sort by axis displacement, which allows us to figure out which parts
of labels to not repeat.
"""
for axis in self.world:
self.world[axis] = sort_using(self.world[axis], self.disp[axis])
self.pixel[axis] = sort_using(self.pixel[axis], self.disp[axis])
self.angle[axis] = sort_using(self.angle[axis], self.disp[axis])
self.text[axis] = sort_using(self.text[axis], self.disp[axis])
self.disp[axis] = sort_using(self.disp[axis], self.disp[axis])
self._stale = True
def simplify_labels(self):
"""
Figure out which parts of labels can be dropped to avoid repetition.
"""
self.sort()
for axis in self.world:
t1 = self.text[axis][0]
for i in range(1, len(self.world[axis])):
t2 = self.text[axis][i]
if len(t1) != len(t2):
t1 = self.text[axis][i]
continue
start = 0
# In the following loop, we need to ignore the last character,
# hence the len(t1) - 1. This is because if we have two strings
# like 13d14m15s we want to make sure that we keep the last
# part (15s) even if the two labels are identical.
for j in range(len(t1) - 1):
if t1[j] != t2[j]:
break
if t1[j] not in '-0123456789.':
start = j + 1
t1 = self.text[axis][i]
if start != 0:
starts_dollar = self.text[axis][i].startswith('$')
self.text[axis][i] = self.text[axis][i][start:]
if starts_dollar:
self.text[axis][i] = '$' + self.text[axis][i]
# Remove any empty LaTeX inline math mode string
if self.text[axis][i] == '$$':
self.text[axis][i] = ''
self._stale = True
def set_pad(self, value):
self._pad = value
self._stale = True
def get_pad(self):
return self._pad
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
self._stale = True
def get_visible_axes(self):
if self._visible_axes == 'all':
return self.world.keys()
else:
return [x for x in self._visible_axes if x in self.world]
def set_exclude_overlapping(self, exclude_overlapping):
self._exclude_overlapping = exclude_overlapping
def _set_xy_alignments(self, renderer, tick_out_size):
"""
Compute and set the x, y positions and the horizontal/vertical alignment of
each label.
"""
if not self._stale:
return
self.simplify_labels()
text_size = renderer.points_to_pixels(self.get_size())
visible_axes = self.get_visible_axes()
self.xy = {axis: {} for axis in visible_axes}
self.ha = {axis: {} for axis in visible_axes}
self.va = {axis: {} for axis in visible_axes}
for axis in visible_axes:
for i in range(len(self.world[axis])):
# In the event that the label is empty (which is not expected
# but could happen in unforeseen corner cases), we should just
# skip to the next label.
if self.text[axis][i] == '':
continue
x, y = self.pixel[axis][i]
pad = renderer.points_to_pixels(self.get_pad() + tick_out_size)
if isinstance(self._frame, RectangularFrame):
# This is just to preserve the current results, but can be
# removed next time the reference images are re-generated.
if np.abs(self.angle[axis][i]) < 45.:
ha = 'right'
va = 'bottom'
dx = -pad
dy = -text_size * 0.5
elif np.abs(self.angle[axis][i] - 90.) < 45:
ha = 'center'
va = 'bottom'
dx = 0
dy = -text_size - pad
elif np.abs(self.angle[axis][i] - 180.) < 45:
ha = 'left'
va = 'bottom'
dx = pad
dy = -text_size * 0.5
else:
ha = 'center'
va = 'bottom'
dx = 0
dy = pad
x = x + dx
y = y + dy
else:
# This is the more general code for arbitrarily oriented
# axes
# Set initial position and find bounding box
self.set_text(self.text[axis][i])
self.set_position((x, y))
bb = super().get_window_extent(renderer)
# Find width and height, as well as angle at which we
# transition which side of the label we use to anchor the
# label.
width = bb.width
height = bb.height
# Project axis angle onto bounding box
ax = np.cos(np.radians(self.angle[axis][i]))
ay = np.sin(np.radians(self.angle[axis][i]))
# Set anchor point for label
if np.abs(self.angle[axis][i]) < 45.:
dx = width
dy = ay * height
elif np.abs(self.angle[axis][i] - 90.) < 45:
dx = ax * width
dy = height
elif np.abs(self.angle[axis][i] - 180.) < 45:
dx = -width
dy = ay * height
else:
dx = ax * width
dy = -height
dx *= 0.5
dy *= 0.5
# Find normalized vector along axis normal, so as to be
# able to nudge the label away by a constant padding factor
dist = np.hypot(dx, dy)
ddx = dx / dist
ddy = dy / dist
dx += ddx * pad
dy += ddy * pad
x = x - dx
y = y - dy
ha = 'center'
va = 'center'
self.xy[axis][i] = (x, y)
self.ha[axis][i] = ha
self.va[axis][i] = va
self._stale = False
def _get_bb(self, axis, i, renderer):
"""
Get the bounding box of an individual label. n.b. _set_xy_alignment()
must be called before this method.
"""
if self.text[axis][i] == '':
return
self.set_text(self.text[axis][i])
self.set_position(self.xy[axis][i])
self.set_ha(self.ha[axis][i])
self.set_va(self.va[axis][i])
return super().get_window_extent(renderer)
def draw(self, renderer, bboxes, ticklabels_bbox, tick_out_size):
if not self.get_visible():
return
self._set_xy_alignments(renderer, tick_out_size)
for axis in self.get_visible_axes():
for i in range(len(self.world[axis])):
# This implicitly sets the label text, position, alignment
bb = self._get_bb(axis, i, renderer)
if bb is None:
continue
# TODO: the problem here is that we might get rid of a label
# that has a key starting bit such as -0:30 where the -0
# might be dropped from all other labels.
if not self._exclude_overlapping or bb.count_overlaps(bboxes) == 0:
super().draw(renderer)
bboxes.append(bb)
ticklabels_bbox[axis].append(bb)
|
61642f404e9b5f2837f6fe9af6cd1cb3ea23187bb464be5ae2bf4e5ac42de5db | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import defaultdict
import numpy as np
from matplotlib.lines import Path, Line2D
from matplotlib.transforms import Affine2D
from matplotlib import rcParams
class Ticks(Line2D):
"""
Ticks are derived from Line2D, and note that ticks themselves
are markers. Thus, you should use set_mec, set_mew, etc.
To change the tick size (length), you need to use
set_ticksize. To change the direction of the ticks (ticks are
in opposite direction of ticklabels by default), use
set_tick_out(False).
Note that Matplotlib's defaults dictionary :data:`~matplotlib.rcParams`
contains default settings (color, size, width) of the form `xtick.*` and
`ytick.*`. In a WCS projection, there may not be a clear relationship
between axes of the projection and 'x' or 'y' axes. For this reason,
we read defaults from `xtick.*`. The following settings affect the
default appearance of ticks:
* `xtick.direction`
* `xtick.major.size`
* `xtick.major.width`
* `xtick.minor.size`
* `xtick.color`
Attributes
----------
ticks_locs : dict
This is set when the ticks are drawn, and is a mapping from axis to
the locations of the ticks for that axis.
"""
def __init__(self, ticksize=None, tick_out=None, **kwargs):
if ticksize is None:
ticksize = rcParams['xtick.major.size']
self.set_ticksize(ticksize)
self.set_minor_ticksize(rcParams['xtick.minor.size'])
self.set_tick_out(rcParams['xtick.direction'] == 'out')
self.clear()
line2d_kwargs = {'color': rcParams['xtick.color'],
'linewidth': rcParams['xtick.major.width']}
line2d_kwargs.update(kwargs)
Line2D.__init__(self, [0.], [0.], **line2d_kwargs)
self.set_visible_axes('all')
self._display_minor_ticks = False
def display_minor_ticks(self, display_minor_ticks):
self._display_minor_ticks = display_minor_ticks
def get_display_minor_ticks(self):
return self._display_minor_ticks
def set_tick_out(self, tick_out):
"""
set True if tick need to be rotated by 180 degree.
"""
self._tick_out = tick_out
def get_tick_out(self):
"""
Return True if the tick will be rotated by 180 degree.
"""
return self._tick_out
def set_ticksize(self, ticksize):
"""
set length of the ticks in points.
"""
self._ticksize = ticksize
def get_ticksize(self):
"""
Return length of the ticks in points.
"""
return self._ticksize
def set_minor_ticksize(self, ticksize):
"""
set length of the minor ticks in points.
"""
self._minor_ticksize = ticksize
def get_minor_ticksize(self):
"""
Return length of the minor ticks in points.
"""
return self._minor_ticksize
@property
def out_size(self):
if self._tick_out:
return self._ticksize
else:
return 0.
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
def get_visible_axes(self):
if self._visible_axes == 'all':
return self.world.keys()
else:
return [x for x in self._visible_axes if x in self.world]
def clear(self):
self.world = defaultdict(list)
self.pixel = defaultdict(list)
self.angle = defaultdict(list)
self.disp = defaultdict(list)
self.minor_world = defaultdict(list)
self.minor_pixel = defaultdict(list)
self.minor_angle = defaultdict(list)
self.minor_disp = defaultdict(list)
def add(self, axis, world, pixel, angle, axis_displacement):
self.world[axis].append(world)
self.pixel[axis].append(pixel)
self.angle[axis].append(angle)
self.disp[axis].append(axis_displacement)
def get_minor_world(self):
return self.minor_world
def add_minor(self, minor_axis, minor_world, minor_pixel, minor_angle,
minor_axis_displacement):
self.minor_world[minor_axis].append(minor_world)
self.minor_pixel[minor_axis].append(minor_pixel)
self.minor_angle[minor_axis].append(minor_angle)
self.minor_disp[minor_axis].append(minor_axis_displacement)
def __len__(self):
return len(self.world)
_tickvert_path = Path([[0., 0.], [1., 0.]])
def draw(self, renderer):
"""
Draw the ticks.
"""
self.ticks_locs = defaultdict(list)
if not self.get_visible():
return
offset = renderer.points_to_pixels(self.get_ticksize())
self._draw_ticks(renderer, self.pixel, self.angle, offset)
if self._display_minor_ticks:
offset = renderer.points_to_pixels(self.get_minor_ticksize())
self._draw_ticks(renderer, self.minor_pixel, self.minor_angle, offset)
def _draw_ticks(self, renderer, pixel_array, angle_array, offset):
"""
Draw the minor ticks.
"""
path_trans = self.get_transform()
gc = renderer.new_gc()
gc.set_foreground(self.get_color())
gc.set_alpha(self.get_alpha())
gc.set_linewidth(self.get_linewidth())
marker_scale = Affine2D().scale(offset, offset)
marker_rotation = Affine2D()
marker_transform = marker_scale + marker_rotation
initial_angle = 180. if self.get_tick_out() else 0.
for axis in self.get_visible_axes():
if axis not in pixel_array:
continue
for loc, angle in zip(pixel_array[axis], angle_array[axis]):
# Set the rotation for this tick
marker_rotation.rotate_deg(initial_angle + angle)
# Draw the markers
locs = path_trans.transform_non_affine(np.array([loc, loc]))
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
Path(locs), path_trans.get_affine())
# Reset the tick rotation before moving to the next tick
marker_rotation.clear()
self.ticks_locs[axis].append(locs)
gc.restore()
|
ccc25ad7471169340b36eb24c216866a792045aa079c338674dacb1ecd715515 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import abc
from collections import OrderedDict
import numpy as np
from matplotlib import rcParams
from matplotlib.lines import Line2D, Path
from matplotlib.patches import PathPatch
__all__ = ['RectangularFrame1D', 'Spine', 'BaseFrame', 'RectangularFrame', 'EllipticalFrame']
class Spine:
"""
A single side of an axes.
This does not need to be a straight line, but represents a 'side' when
determining which part of the frame to put labels and ticks on.
"""
def __init__(self, parent_axes, transform):
self.parent_axes = parent_axes
self.transform = transform
self._data = None
self._pixel = None
self._world = None
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
if value is None:
self._pixel = None
self._world = None
else:
self._pixel = self.parent_axes.transData.transform(self._data)
with np.errstate(invalid='ignore'):
self._world = self.transform.transform(self._data)
self._update_normal()
@property
def pixel(self):
return self._pixel
@pixel.setter
def pixel(self, value):
self._pixel = value
if value is None:
self._data = None
self._world = None
else:
self._data = self.parent_axes.transData.inverted().transform(self._data)
self._world = self.transform.transform(self._data)
self._update_normal()
@property
def world(self):
return self._world
@world.setter
def world(self, value):
self._world = value
if value is None:
self._data = None
self._pixel = None
else:
self._data = self.transform.transform(value)
self._pixel = self.parent_axes.transData.transform(self._data)
self._update_normal()
def _update_normal(self):
# Find angle normal to border and inwards, in display coordinate
dx = self.pixel[1:, 0] - self.pixel[:-1, 0]
dy = self.pixel[1:, 1] - self.pixel[:-1, 1]
self.normal_angle = np.degrees(np.arctan2(dx, -dy))
def _halfway_x_y_angle(self):
"""
Return the x, y, normal_angle values halfway along the spine
"""
x_disp, y_disp = self.pixel[:, 0], self.pixel[:, 1]
# Get distance along the path
d = np.hstack([0., np.cumsum(np.sqrt(np.diff(x_disp) ** 2 + np.diff(y_disp) ** 2))])
xcen = np.interp(d[-1] / 2., d, x_disp)
ycen = np.interp(d[-1] / 2., d, y_disp)
# Find segment along which the mid-point lies
imin = np.searchsorted(d, d[-1] / 2.) - 1
# Find normal of the axis label facing outwards on that segment
normal_angle = self.normal_angle[imin] + 180.
return xcen, ycen, normal_angle
class SpineXAligned(Spine):
"""
A single side of an axes, aligned with the X data axis.
This does not need to be a straight line, but represents a 'side' when
determining which part of the frame to put labels and ticks on.
"""
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
if value is None:
self._pixel = None
self._world = None
else:
self._pixel = self.parent_axes.transData.transform(self._data)
with np.errstate(invalid='ignore'):
self._world = self.transform.transform(self._data[:,0:1])
self._update_normal()
@property
def pixel(self):
return self._pixel
@pixel.setter
def pixel(self, value):
self._pixel = value
if value is None:
self._data = None
self._world = None
else:
self._data = self.parent_axes.transData.inverted().transform(self._data)
self._world = self.transform.transform(self._data[:,0:1])
self._update_normal()
class BaseFrame(OrderedDict, metaclass=abc.ABCMeta):
"""
Base class for frames, which are collections of
:class:`~astropy.visualization.wcsaxes.frame.Spine` instances.
"""
spine_class = Spine
def __init__(self, parent_axes, transform, path=None):
super().__init__()
self.parent_axes = parent_axes
self._transform = transform
self._linewidth = rcParams['axes.linewidth']
self._color = rcParams['axes.edgecolor']
self._path = path
for axis in self.spine_names:
self[axis] = self.spine_class(parent_axes, transform)
@property
def origin(self):
ymin, ymax = self.parent_axes.get_ylim()
return 'lower' if ymin < ymax else 'upper'
@property
def transform(self):
return self._transform
@transform.setter
def transform(self, value):
self._transform = value
for axis in self:
self[axis].transform = value
def _update_patch_path(self):
self.update_spines()
x, y = [], []
for axis in self:
x.append(self[axis].data[:, 0])
y.append(self[axis].data[:, 1])
vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose()
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
@property
def patch(self):
self._update_patch_path()
return PathPatch(self._path, transform=self.parent_axes.transData,
facecolor=rcParams['axes.facecolor'], edgecolor='white')
def draw(self, renderer):
for axis in self:
x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1]
line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000)
line.draw(renderer)
def sample(self, n_samples):
self.update_spines()
spines = OrderedDict()
for axis in self:
data = self[axis].data
p = np.linspace(0., 1., data.shape[0])
p_new = np.linspace(0., 1., n_samples)
spines[axis] = self.spine_class(self.parent_axes, self.transform)
spines[axis].data = np.array([np.interp(p_new, p, d) for d in data.T]).transpose()
return spines
def set_color(self, color):
"""
Sets the color of the frame.
Parameters
----------
color : str
The color of the frame.
"""
self._color = color
def get_color(self):
return self._color
def set_linewidth(self, linewidth):
"""
Sets the linewidth of the frame.
Parameters
----------
linewidth : float
The linewidth of the frame in points.
"""
self._linewidth = linewidth
def get_linewidth(self):
return self._linewidth
@abc.abstractmethod
def update_spines(self):
raise NotImplementedError("")
class RectangularFrame1D(BaseFrame):
"""
A classic rectangular frame.
"""
spine_names = 'bt'
spine_class = SpineXAligned
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
self['b'].data = np.array(([xmin, ymin], [xmax, ymin]))
self['t'].data = np.array(([xmax, ymax], [xmin, ymax]))
def _update_patch_path(self):
self.update_spines()
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
x = [xmin, xmax, xmax, xmin, xmin]
y = [ymin, ymin, ymax, ymax, ymin]
vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose()
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
def draw(self, renderer):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
x = [xmin, xmax, xmax, xmin, xmin]
y = [ymin, ymin, ymax, ymax, ymin]
line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000,
transform=self.parent_axes.transData)
line.draw(renderer)
class RectangularFrame(BaseFrame):
"""
A classic rectangular frame.
"""
spine_names = 'brtl'
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
self['b'].data = np.array(([xmin, ymin], [xmax, ymin]))
self['r'].data = np.array(([xmax, ymin], [xmax, ymax]))
self['t'].data = np.array(([xmax, ymax], [xmin, ymax]))
self['l'].data = np.array(([xmin, ymax], [xmin, ymin]))
class EllipticalFrame(BaseFrame):
"""
An elliptical frame.
"""
spine_names = 'chv'
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
xmid = 0.5 * (xmax + xmin)
ymid = 0.5 * (ymax + ymin)
dx = xmid - xmin
dy = ymid - ymin
theta = np.linspace(0., 2 * np.pi, 1000)
self['c'].data = np.array([xmid + dx * np.cos(theta),
ymid + dy * np.sin(theta)]).transpose()
self['h'].data = np.array([np.linspace(xmin, xmax, 1000),
np.repeat(ymid, 1000)]).transpose()
self['v'].data = np.array([np.repeat(xmid, 1000),
np.linspace(ymin, ymax, 1000)]).transpose()
def _update_patch_path(self):
"""Override path patch to include only the outer ellipse,
not the major and minor axes in the middle."""
self.update_spines()
vertices = self['c'].data
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
def draw(self, renderer):
"""Override to draw only the outer ellipse,
not the major and minor axes in the middle.
FIXME: we may want to add a general method to give the user control
over which spines are drawn."""
axis = 'c'
x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1]
line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000)
line.draw(renderer)
|
479c27960b9f238421de203c7f6ddeb160d806facbb4f88774ad5bcda243f4c2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.io import fits
from astropy.utils.compat.optional_deps import HAS_MATPLOTLIB
if HAS_MATPLOTLIB:
import matplotlib.image as mpimg
from astropy.visualization.scripts.fits2bitmap import fits2bitmap, main
@pytest.mark.skipif('not HAS_MATPLOTLIB')
class TestFits2Bitmap:
def setup_class(self):
self.filename = 'test.fits'
self.array = np.arange(16384).reshape((128, 128))
@pytest.mark.openfiles_ignore
def test_function(self, tmpdir):
filename = tmpdir.join(self.filename).strpath
fits.writeto(filename, self.array)
fits2bitmap(filename)
@pytest.mark.openfiles_ignore
def test_script(self, tmpdir):
filename = tmpdir.join(self.filename).strpath
fits.writeto(filename, self.array)
main([filename, '-e', '0'])
@pytest.mark.openfiles_ignore
def test_exten_num(self, tmpdir):
filename = tmpdir.join(self.filename).strpath
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU(self.array)
hdulist = fits.HDUList([hdu1, hdu2])
hdulist.writeto(filename)
main([filename, '-e', '1'])
@pytest.mark.openfiles_ignore
def test_exten_name(self, tmpdir):
filename = tmpdir.join(self.filename).strpath
hdu1 = fits.PrimaryHDU()
extname = 'SCI'
hdu2 = fits.ImageHDU(self.array)
hdu2.header['EXTNAME'] = extname
hdulist = fits.HDUList([hdu1, hdu2])
hdulist.writeto(filename)
main([filename, '-e', extname])
@pytest.mark.parametrize('file_exten', ['.gz', '.bz2'])
def test_compressed_fits(self, tmpdir, file_exten):
filename = tmpdir.join('test.fits' + file_exten).strpath
fits.writeto(filename, self.array)
main([filename, '-e', '0'])
@pytest.mark.openfiles_ignore
def test_orientation(self, tmpdir):
"""
Regression test to check the image vertical orientation/origin.
"""
filename = tmpdir.join(self.filename).strpath
out_filename = 'fits2bitmap_test.png'
out_filename = tmpdir.join(out_filename).strpath
data = np.zeros((32, 32))
data[0:16, :] = 1.
fits.writeto(filename, data)
main([filename, '-e', '0', '-o', out_filename])
img = mpimg.imread(out_filename)
assert img[0, 0, 0] == 0
assert img[31, 31, 0] == 1
|
3cfec60793d0353f64381d4f183e4ea9dc88b9c53eb7c72530d5cfa7c3ed57d3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import textwrap
import numpy as np
import pytest
from astropy.io import fits
from astropy.nddata.nduncertainty import (
StdDevUncertainty, MissingDataAssociationException, VarianceUncertainty,
InverseVariance)
from astropy import units as u
from astropy import log
from astropy.wcs import WCS, FITSFixedWarning
from astropy.utils import NumpyRNGContext
from astropy.utils.data import (get_pkg_data_filename, get_pkg_data_filenames,
get_pkg_data_contents)
from astropy.utils.exceptions import AstropyWarning
from astropy.nddata.ccddata import CCDData
from astropy.nddata import _testing as nd_testing
from astropy.table import Table
DEFAULT_DATA_SIZE = 100
with NumpyRNGContext(123):
_random_array = np.random.normal(size=[DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE])
def create_ccd_data():
"""
Return a CCDData object of size DEFAULT_DATA_SIZE x DEFAULT_DATA_SIZE
with units of ADU.
"""
data = _random_array.copy()
fake_meta = {'my_key': 42, 'your_key': 'not 42'}
ccd = CCDData(data, unit=u.adu)
ccd.header = fake_meta
return ccd
def test_ccddata_empty():
with pytest.raises(TypeError):
CCDData() # empty initializer should fail
def test_ccddata_must_have_unit():
with pytest.raises(ValueError):
CCDData(np.zeros([2, 2]))
def test_ccddata_unit_cannot_be_set_to_none():
ccd_data = create_ccd_data()
with pytest.raises(TypeError):
ccd_data.unit = None
def test_ccddata_meta_header_conflict():
with pytest.raises(ValueError) as exc:
CCDData([1, 2, 3], unit='', meta={1: 1}, header={2: 2})
assert "can't have both header and meta." in str(exc.value)
def test_ccddata_simple():
ccd_data = create_ccd_data()
assert ccd_data.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE)
assert ccd_data.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE
assert ccd_data.dtype == np.dtype(float)
def test_ccddata_init_with_string_electron_unit():
ccd = CCDData(np.zeros([2, 2]), unit="electron")
assert ccd.unit is u.electron
def test_initialize_from_FITS(tmpdir):
ccd_data = create_ccd_data()
hdu = fits.PrimaryHDU(ccd_data)
hdulist = fits.HDUList([hdu])
filename = tmpdir.join('afile.fits').strpath
hdulist.writeto(filename)
cd = CCDData.read(filename, unit=u.electron)
assert cd.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE)
assert cd.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE
assert np.issubdtype(cd.data.dtype, np.floating)
for k, v in hdu.header.items():
assert cd.meta[k] == v
def test_initialize_from_fits_with_unit_in_header(tmpdir):
fake_img = np.zeros([2, 2])
hdu = fits.PrimaryHDU(fake_img)
hdu.header['bunit'] = u.adu.to_string()
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
ccd = CCDData.read(filename)
# ccd should pick up the unit adu from the fits header...did it?
assert ccd.unit is u.adu
# An explicit unit in the read overrides any unit in the FITS file
ccd2 = CCDData.read(filename, unit="photon")
assert ccd2.unit is u.photon
def test_initialize_from_fits_with_ADU_in_header(tmpdir):
fake_img = np.zeros([2, 2])
hdu = fits.PrimaryHDU(fake_img)
hdu.header['bunit'] = 'ADU'
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
ccd = CCDData.read(filename)
# ccd should pick up the unit adu from the fits header...did it?
assert ccd.unit is u.adu
def test_initialize_from_fits_with_invalid_unit_in_header(tmpdir):
hdu = fits.PrimaryHDU(np.ones((2, 2)))
hdu.header['bunit'] = 'definetely-not-a-unit'
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
with pytest.raises(ValueError):
CCDData.read(filename)
def test_initialize_from_fits_with_technically_invalid_but_not_really(tmpdir):
hdu = fits.PrimaryHDU(np.ones((2, 2)))
hdu.header['bunit'] = 'ELECTRONS/S'
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
ccd = CCDData.read(filename)
assert ccd.unit == u.electron/u.s
def test_initialize_from_fits_with_data_in_different_extension(tmpdir):
fake_img = np.arange(4).reshape(2, 2)
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU(fake_img)
hdus = fits.HDUList([hdu1, hdu2])
filename = tmpdir.join('afile.fits').strpath
hdus.writeto(filename)
ccd = CCDData.read(filename, unit='adu')
# ccd should pick up the unit adu from the fits header...did it?
np.testing.assert_array_equal(ccd.data, fake_img)
# check that the header is the combined header
assert hdu2.header + hdu1.header == ccd.header
def test_initialize_from_fits_with_extension(tmpdir):
fake_img1 = np.zeros([2, 2])
fake_img2 = np.arange(4).reshape(2, 2)
hdu0 = fits.PrimaryHDU()
hdu1 = fits.ImageHDU(fake_img1, name='first', ver=1)
hdu2 = fits.ImageHDU(fake_img2, name='second', ver=1)
hdus = fits.HDUList([hdu0, hdu1, hdu2])
filename = tmpdir.join('afile.fits').strpath
hdus.writeto(filename)
ccd = CCDData.read(filename, hdu=2, unit='adu')
# ccd should pick up the unit adu from the fits header...did it?
np.testing.assert_array_equal(ccd.data, fake_img2)
# check hdu string parameter
ccd = CCDData.read(filename, hdu='second', unit='adu')
np.testing.assert_array_equal(ccd.data, fake_img2)
# check hdu tuple parameter
ccd = CCDData.read(filename, hdu=('second', 1), unit='adu')
np.testing.assert_array_equal(ccd.data, fake_img2)
def test_write_unit_to_hdu():
ccd_data = create_ccd_data()
ccd_unit = ccd_data.unit
hdulist = ccd_data.to_hdu()
assert 'bunit' in hdulist[0].header
assert hdulist[0].header['bunit'] == ccd_unit.to_string()
def test_initialize_from_FITS_bad_keyword_raises_error(tmpdir):
# There are two fits.open keywords that are not permitted in ccdproc:
# do_not_scale_image_data and scale_back
ccd_data = create_ccd_data()
filename = tmpdir.join('test.fits').strpath
ccd_data.write(filename)
with pytest.raises(TypeError):
CCDData.read(filename, unit=ccd_data.unit,
do_not_scale_image_data=True)
with pytest.raises(TypeError):
CCDData.read(filename, unit=ccd_data.unit, scale_back=True)
def test_ccddata_writer(tmpdir):
ccd_data = create_ccd_data()
filename = tmpdir.join('test.fits').strpath
ccd_data.write(filename)
ccd_disk = CCDData.read(filename, unit=ccd_data.unit)
np.testing.assert_array_equal(ccd_data.data, ccd_disk.data)
def test_ccddata_writer_as_imagehdu(tmpdir):
ccd_data = create_ccd_data()
filename = tmpdir.join('test.fits').strpath
ccd_data.write(filename, as_image_hdu=False)
with fits.open(filename) as hdus:
assert len(hdus) == 1
filename = tmpdir.join('test2.fits').strpath
ccd_data.write(filename, as_image_hdu=True)
with fits.open(filename) as hdus:
assert len(hdus) == 2
assert isinstance(hdus[1], fits.ImageHDU)
def test_ccddata_meta_is_case_sensitive():
ccd_data = create_ccd_data()
key = 'SoMeKEY'
ccd_data.meta[key] = 10
assert key.lower() not in ccd_data.meta
assert key.upper() not in ccd_data.meta
assert key in ccd_data.meta
def test_ccddata_meta_is_not_fits_header():
ccd_data = create_ccd_data()
ccd_data.meta = {'OBSERVER': 'Edwin Hubble'}
assert not isinstance(ccd_data.meta, fits.Header)
def test_fromMEF(tmpdir):
ccd_data = create_ccd_data()
hdu = fits.PrimaryHDU(ccd_data)
hdu2 = fits.PrimaryHDU(2 * ccd_data.data)
hdulist = fits.HDUList(hdu)
hdulist.append(hdu2)
filename = tmpdir.join('afile.fits').strpath
hdulist.writeto(filename)
# by default, we reading from the first extension
cd = CCDData.read(filename, unit=u.electron)
np.testing.assert_array_equal(cd.data, ccd_data.data)
# but reading from the second should work too
cd = CCDData.read(filename, hdu=1, unit=u.electron)
np.testing.assert_array_equal(cd.data, 2 * ccd_data.data)
def test_metafromheader():
hdr = fits.header.Header()
hdr['observer'] = 'Edwin Hubble'
hdr['exptime'] = '3600'
d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron)
assert d1.meta['OBSERVER'] == 'Edwin Hubble'
assert d1.header['OBSERVER'] == 'Edwin Hubble'
def test_metafromdict():
dic = {'OBSERVER': 'Edwin Hubble', 'EXPTIME': 3600}
d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron)
assert d1.meta['OBSERVER'] == 'Edwin Hubble'
def test_header2meta():
hdr = fits.header.Header()
hdr['observer'] = 'Edwin Hubble'
hdr['exptime'] = '3600'
d1 = CCDData(np.ones((5, 5)), unit=u.electron)
d1.header = hdr
assert d1.meta['OBSERVER'] == 'Edwin Hubble'
assert d1.header['OBSERVER'] == 'Edwin Hubble'
def test_metafromstring_fail():
hdr = 'this is not a valid header'
with pytest.raises(TypeError):
CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu)
def test_setting_bad_uncertainty_raises_error():
ccd_data = create_ccd_data()
with pytest.raises(TypeError):
# Uncertainty is supposed to be an instance of NDUncertainty
ccd_data.uncertainty = 10
def test_setting_uncertainty_with_array():
ccd_data = create_ccd_data()
ccd_data.uncertainty = None
fake_uncertainty = np.sqrt(np.abs(ccd_data.data))
ccd_data.uncertainty = fake_uncertainty.copy()
np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty)
def test_setting_uncertainty_wrong_shape_raises_error():
ccd_data = create_ccd_data()
with pytest.raises(ValueError):
ccd_data.uncertainty = np.zeros([3, 4])
def test_to_hdu():
ccd_data = create_ccd_data()
ccd_data.meta = {'observer': 'Edwin Hubble'}
fits_hdulist = ccd_data.to_hdu()
assert isinstance(fits_hdulist, fits.HDUList)
for k, v in ccd_data.meta.items():
assert fits_hdulist[0].header[k] == v
np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data)
def test_to_hdu_as_imagehdu():
ccd_data = create_ccd_data()
fits_hdulist = ccd_data.to_hdu(as_image_hdu=False)
assert isinstance(fits_hdulist[0], fits.PrimaryHDU)
fits_hdulist = ccd_data.to_hdu(as_image_hdu=True)
assert isinstance(fits_hdulist[0], fits.ImageHDU)
def test_copy():
ccd_data = create_ccd_data()
ccd_copy = ccd_data.copy()
np.testing.assert_array_equal(ccd_copy.data, ccd_data.data)
assert ccd_copy.unit == ccd_data.unit
assert ccd_copy.meta == ccd_data.meta
@pytest.mark.parametrize('operation,affects_uncertainty', [
("multiply", True),
("divide", True),
])
@pytest.mark.parametrize('operand', [
2.0,
2 * u.dimensionless_unscaled,
2 * u.photon / u.adu,
])
@pytest.mark.parametrize('with_uncertainty', [
True,
False])
def test_mult_div_overload(operand, with_uncertainty,
operation, affects_uncertainty):
ccd_data = create_ccd_data()
if with_uncertainty:
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
method = getattr(ccd_data, operation)
np_method = getattr(np, operation)
result = method(operand)
assert result is not ccd_data
assert isinstance(result, CCDData)
assert (result.uncertainty is None or
isinstance(result.uncertainty, StdDevUncertainty))
try:
op_value = operand.value
except AttributeError:
op_value = operand
np.testing.assert_array_equal(result.data,
np_method(ccd_data.data, op_value))
if with_uncertainty:
if affects_uncertainty:
np.testing.assert_array_equal(result.uncertainty.array,
np_method(ccd_data.uncertainty.array,
op_value))
else:
np.testing.assert_array_equal(result.uncertainty.array,
ccd_data.uncertainty.array)
else:
assert result.uncertainty is None
if isinstance(operand, u.Quantity):
# Need the "1 *" below to force arguments to be Quantity to work around
# astropy/astropy#2377
expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit
assert result.unit == expected_unit
else:
assert result.unit == ccd_data.unit
@pytest.mark.parametrize('operation,affects_uncertainty', [
("add", False),
("subtract", False),
])
@pytest.mark.parametrize('operand,expect_failure', [
(2.0, u.UnitsError), # fail--units don't match image
(2 * u.dimensionless_unscaled, u.UnitsError), # same
(2 * u.adu, False),
])
@pytest.mark.parametrize('with_uncertainty', [
True,
False])
def test_add_sub_overload(operand, expect_failure, with_uncertainty,
operation, affects_uncertainty):
ccd_data = create_ccd_data()
if with_uncertainty:
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
method = getattr(ccd_data, operation)
np_method = getattr(np, operation)
if expect_failure:
with pytest.raises(expect_failure):
result = method(operand)
return
else:
result = method(operand)
assert result is not ccd_data
assert isinstance(result, CCDData)
assert (result.uncertainty is None or
isinstance(result.uncertainty, StdDevUncertainty))
try:
op_value = operand.value
except AttributeError:
op_value = operand
np.testing.assert_array_equal(result.data,
np_method(ccd_data.data, op_value))
if with_uncertainty:
if affects_uncertainty:
np.testing.assert_array_equal(result.uncertainty.array,
np_method(ccd_data.uncertainty.array,
op_value))
else:
np.testing.assert_array_equal(result.uncertainty.array,
ccd_data.uncertainty.array)
else:
assert result.uncertainty is None
if isinstance(operand, u.Quantity):
assert (result.unit == ccd_data.unit and result.unit == operand.unit)
else:
assert result.unit == ccd_data.unit
def test_arithmetic_overload_fails():
ccd_data = create_ccd_data()
with pytest.raises(TypeError):
ccd_data.multiply("five")
with pytest.raises(TypeError):
ccd_data.divide("five")
with pytest.raises(TypeError):
ccd_data.add("five")
with pytest.raises(TypeError):
ccd_data.subtract("five")
def test_arithmetic_no_wcs_compare():
ccd = CCDData(np.ones((10, 10)), unit='')
assert ccd.add(ccd, compare_wcs=None).wcs is None
assert ccd.subtract(ccd, compare_wcs=None).wcs is None
assert ccd.multiply(ccd, compare_wcs=None).wcs is None
assert ccd.divide(ccd, compare_wcs=None).wcs is None
def test_arithmetic_with_wcs_compare():
def return_true(_, __):
return True
wcs1, wcs2 = nd_testing.create_two_equal_wcs(naxis=2)
ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=wcs1)
ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=wcs2)
nd_testing.assert_wcs_seem_equal(
ccd1.add(ccd2, compare_wcs=return_true).wcs,
wcs1)
nd_testing.assert_wcs_seem_equal(
ccd1.subtract(ccd2, compare_wcs=return_true).wcs,
wcs1)
nd_testing.assert_wcs_seem_equal(
ccd1.multiply(ccd2, compare_wcs=return_true).wcs,
wcs1)
nd_testing.assert_wcs_seem_equal(
ccd1.divide(ccd2, compare_wcs=return_true).wcs,
wcs1)
def test_arithmetic_with_wcs_compare_fail():
def return_false(_, __):
return False
ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=WCS())
ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=WCS())
with pytest.raises(ValueError):
ccd1.add(ccd2, compare_wcs=return_false)
with pytest.raises(ValueError):
ccd1.subtract(ccd2, compare_wcs=return_false)
with pytest.raises(ValueError):
ccd1.multiply(ccd2, compare_wcs=return_false)
with pytest.raises(ValueError):
ccd1.divide(ccd2, compare_wcs=return_false)
def test_arithmetic_overload_ccddata_operand():
ccd_data = create_ccd_data()
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
operand = ccd_data.copy()
result = ccd_data.add(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
2 * ccd_data.data)
np.testing.assert_array_almost_equal_nulp(
result.uncertainty.array,
np.sqrt(2) * ccd_data.uncertainty.array
)
result = ccd_data.subtract(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
0 * ccd_data.data)
np.testing.assert_array_almost_equal_nulp(
result.uncertainty.array,
np.sqrt(2) * ccd_data.uncertainty.array
)
result = ccd_data.multiply(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
ccd_data.data ** 2)
expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) *
ccd_data.uncertainty.array)
np.testing.assert_allclose(result.uncertainty.array,
expected_uncertainty)
result = ccd_data.divide(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
np.ones_like(ccd_data.data))
expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) *
ccd_data.uncertainty.array)
np.testing.assert_allclose(result.uncertainty.array,
expected_uncertainty)
def test_arithmetic_overload_differing_units():
a = np.array([1, 2, 3]) * u.m
b = np.array([1, 2, 3]) * u.cm
ccddata = CCDData(a)
# TODO: Could also be parametrized.
res = ccddata.add(b)
np.testing.assert_array_almost_equal(res.data, np.add(a, b).value)
assert res.unit == np.add(a, b).unit
res = ccddata.subtract(b)
np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value)
assert res.unit == np.subtract(a, b).unit
res = ccddata.multiply(b)
np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value)
assert res.unit == np.multiply(a, b).unit
res = ccddata.divide(b)
np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value)
assert res.unit == np.divide(a, b).unit
def test_arithmetic_add_with_array():
ccd = CCDData(np.ones((3, 3)), unit='')
res = ccd.add(np.arange(3))
np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3)
ccd = CCDData(np.ones((3, 3)), unit='adu')
with pytest.raises(ValueError):
ccd.add(np.arange(3))
def test_arithmetic_subtract_with_array():
ccd = CCDData(np.ones((3, 3)), unit='')
res = ccd.subtract(np.arange(3))
np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3)
ccd = CCDData(np.ones((3, 3)), unit='adu')
with pytest.raises(ValueError):
ccd.subtract(np.arange(3))
def test_arithmetic_multiply_with_array():
ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m)
res = ccd.multiply(np.ones((3, 3)) * 2)
np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3)
assert res.unit == ccd.unit
def test_arithmetic_divide_with_array():
ccd = CCDData(np.ones((3, 3)), unit=u.m)
res = ccd.divide(np.ones((3, 3)) * 2)
np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3)
assert res.unit == ccd.unit
def test_history_preserved_if_metadata_is_fits_header(tmpdir):
fake_img = np.zeros([2, 2])
hdu = fits.PrimaryHDU(fake_img)
hdu.header['history'] = 'one'
hdu.header['history'] = 'two'
hdu.header['history'] = 'three'
assert len(hdu.header['history']) == 3
tmp_file = tmpdir.join('temp.fits').strpath
hdu.writeto(tmp_file)
ccd_read = CCDData.read(tmp_file, unit="adu")
assert ccd_read.header['history'] == hdu.header['history']
def test_infol_logged_if_unit_in_fits_header(tmpdir):
ccd_data = create_ccd_data()
tmpfile = tmpdir.join('temp.fits')
ccd_data.write(tmpfile.strpath)
log.setLevel('INFO')
explicit_unit_name = "photon"
with log.log_to_list() as log_list:
_ = CCDData.read(tmpfile.strpath, unit=explicit_unit_name)
assert explicit_unit_name in log_list[0].message
def test_wcs_attribute(tmpdir):
"""
Check that WCS attribute gets added to header, and that if a CCDData
object is created from a FITS file with a header, and the WCS attribute
is modified, then the CCDData object is turned back into an hdu, the
WCS object overwrites the old WCS information in the header.
"""
ccd_data = create_ccd_data()
tmpfile = tmpdir.join('temp.fits')
# This wcs example is taken from the astropy.wcs docs.
wcs = WCS(naxis=2)
wcs.wcs.crpix = np.array(ccd_data.shape) / 2
wcs.wcs.cdelt = np.array([-0.066667, 0.066667])
wcs.wcs.crval = [0, -90]
wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"]
wcs.wcs.set_pv([(2, 1, 45.0)])
ccd_data.header = ccd_data.to_hdu()[0].header
ccd_data.header.extend(wcs.to_header(), useblanks=False)
ccd_data.write(tmpfile.strpath)
# Get the header length after it has been extended by the WCS keywords
original_header_length = len(ccd_data.header)
ccd_new = CCDData.read(tmpfile.strpath)
# WCS attribute should be set for ccd_new
assert ccd_new.wcs is not None
# WCS attribute should be equal to wcs above.
assert ccd_new.wcs.wcs == wcs.wcs
# Converting CCDData object with wcs to an hdu shouldn't
# create duplicate wcs-related entries in the header.
ccd_new_hdu = ccd_new.to_hdu()[0]
assert len(ccd_new_hdu.header) == original_header_length
# Making a CCDData with WCS (but not WCS in the header) should lead to
# WCS information in the header when it is converted to an HDU.
ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu")
hdu = ccd_wcs_not_in_header.to_hdu()[0]
wcs_header = wcs.to_header()
for k in wcs_header.keys():
# Skip these keywords if they are in the WCS header because they are
# not WCS-specific.
if k in ['', 'COMMENT', 'HISTORY']:
continue
# No keyword from the WCS should be in the header.
assert k not in ccd_wcs_not_in_header.header
# Every keyword in the WCS should be in the header of the HDU
assert hdu.header[k] == wcs_header[k]
# Now check that if WCS of a CCDData is modified, then the CCDData is
# converted to an HDU, the WCS keywords in the header are overwritten
# with the appropriate keywords from the header.
#
# ccd_new has a WCS and WCS keywords in the header, so try modifying
# the WCS.
ccd_new.wcs.wcs.cdelt *= 2
ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0]
assert ccd_new_hdu_mod_wcs.header['CDELT1'] == ccd_new.wcs.wcs.cdelt[0]
assert ccd_new_hdu_mod_wcs.header['CDELT2'] == ccd_new.wcs.wcs.cdelt[1]
def test_wcs_keywords_removed_from_header():
"""
Test, for the file included with the nddata tests, that WCS keywords are
properly removed from header.
"""
from astropy.nddata.ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER
keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER)
data_file = get_pkg_data_filename('data/sip-wcs.fits')
ccd = CCDData.read(data_file)
with pytest.warns(AstropyWarning,
match=r'Some non-standard WCS keywords were excluded'):
wcs_header = ccd.wcs.to_header()
assert not (set(wcs_header) & set(ccd.meta) - keepers)
# Make sure that exceptions are not raised when trying to remove missing
# keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'.
data_file1 = get_pkg_data_filename('../../io/fits/tests/data/o4sp040b0_raw.fits')
with pytest.warns(FITSFixedWarning, match=r"'unitfix' made the change"):
ccd = CCDData.read(data_file1, unit='count')
def test_wcs_SIP_coefficient_keywords_removed():
# If SIP polynomials are present, check that no more polynomial
# coefficients remain in the header. See #8598
# The SIP paper is ambiguous as to whether keywords like
# A_0_0 can appear in the header for a 2nd order or higher
# polynomial. The paper clearly says that the corrections
# are only for quadratic or higher order, so A_0_0 and the like
# should be zero if they are present, but they apparently can be
# there (or at least astrometry.net produces them).
# astropy WCS does not write those coefficients, so they were
# not being removed from the header even though they are WCS-related.
data_file = get_pkg_data_filename('data/sip-wcs.fits')
test_keys = ['A_0_0', 'B_0_1']
# Make sure the keywords added to this file for testing are there
with fits.open(data_file) as hdu:
for key in test_keys:
assert key in hdu[0].header
ccd = CCDData.read(data_file)
# Now the test...the two keywords above should have been removed.
for key in test_keys:
assert key not in ccd.header
@pytest.mark.filterwarnings('ignore')
def test_wcs_keyword_removal_for_wcs_test_files():
"""
Test, for the WCS test files, that keyword removal works as
expected. Those cover a much broader range of WCS types than
test_wcs_keywords_removed_from_header.
Includes regression test for #8597
"""
from astropy.nddata.ccddata import _generate_wcs_and_update_header
from astropy.nddata.ccddata import (_KEEP_THESE_KEYWORDS_IN_HEADER,
_CDs, _PCs)
keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER)
wcs_headers = get_pkg_data_filenames('../../wcs/tests/data',
pattern='*.hdr')
for hdr in wcs_headers:
# Skip the files that are expected to be bad...
if ('invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr or
'chandra-pixlist-wcs' in hdr):
continue
header_string = get_pkg_data_contents(hdr)
header = fits.Header.fromstring(header_string)
wcs = WCS(header_string)
header_from_wcs = wcs.to_header(relax=True)
new_header, new_wcs = _generate_wcs_and_update_header(header)
new_wcs_header = new_wcs.to_header(relax=True)
# Make sure all of the WCS-related keywords generated by astropy
# have been removed.
assert not (set(new_header) &
set(new_wcs_header) -
keepers)
# Check that new_header contains no remaining WCS information.
# Specifically, check that
# 1. The combination of new_header and new_wcs does not contain
# both PCi_j and CDi_j keywords. See #8597.
# Check for 1
final_header = new_header + new_wcs_header
final_header_set = set(final_header)
if _PCs & final_header_set:
assert not (_CDs & final_header_set)
elif _CDs & final_header_set:
assert not (_PCs & final_header_set)
# Check that the new wcs is the same as the old.
for k, v in new_wcs_header.items():
if isinstance(v, str):
assert header_from_wcs[k] == v
else:
np.testing.assert_almost_equal(header_from_wcs[k], v)
def test_read_wcs_not_creatable(tmpdir):
# The following Header can't be converted to a WCS object. See also #6499.
hdr_txt_example_WCS = textwrap.dedent('''
SIMPLE = T / Fits standard
BITPIX = 16 / Bits per pixel
NAXIS = 2 / Number of axes
NAXIS1 = 1104 / Axis length
NAXIS2 = 4241 / Axis length
CRVAL1 = 164.98110962 / Physical value of the reference pixel X
CRVAL2 = 44.34089279 / Physical value of the reference pixel Y
CRPIX1 = -34.0 / Reference pixel in X (pixel)
CRPIX2 = 2041.0 / Reference pixel in Y (pixel)
CDELT1 = 0.10380000 / X Scale projected on detector (#/pix)
CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix)
CTYPE1 = 'RA---TAN' / Pixel coordinate system
CTYPE2 = 'WAVELENGTH' / Pixel coordinate system
CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1
CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2
CD1_1 = 0.20760000 / Pixel Coordinate translation matrix
CD1_2 = 0.00000000 / Pixel Coordinate translation matrix
CD2_1 = 0.00000000 / Pixel Coordinate translation matrix
CD2_2 = 0.10380000 / Pixel Coordinate translation matrix
C2YPE1 = 'RA---TAN' / Pixel coordinate system
C2YPE2 = 'DEC--TAN' / Pixel coordinate system
C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1
C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2
RADECSYS= 'FK5 ' / The equatorial coordinate system
''')
hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep='\n')
hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)])
filename = tmpdir.join('afile.fits').strpath
hdul.writeto(filename)
# The hdr cannot be converted to a WCS object because of an
# InconsistentAxisTypesError but it should still open the file
ccd = CCDData.read(filename, unit='adu')
assert ccd.wcs is None
def test_header():
ccd_data = create_ccd_data()
a = {'Observer': 'Hubble'}
ccd = CCDData(ccd_data, header=a)
assert ccd.meta == a
def test_wcs_arithmetic():
ccd_data = create_ccd_data()
wcs = WCS(naxis=2)
ccd_data.wcs = wcs
result = ccd_data.multiply(1.0)
nd_testing.assert_wcs_seem_equal(result.wcs, wcs)
@pytest.mark.parametrize('operation',
['multiply', 'divide', 'add', 'subtract'])
def test_wcs_arithmetic_ccd(operation):
ccd_data = create_ccd_data()
ccd_data2 = ccd_data.copy()
ccd_data.wcs = WCS(naxis=2)
method = getattr(ccd_data, operation)
result = method(ccd_data2)
nd_testing.assert_wcs_seem_equal(result.wcs, ccd_data.wcs)
assert ccd_data2.wcs is None
def test_wcs_sip_handling():
"""
Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive
a roundtrip unchanged.
"""
data_file = get_pkg_data_filename('data/sip-wcs.fits')
def check_wcs_ctypes(header):
expected_wcs_ctypes = {
'CTYPE1': 'RA---TAN-SIP',
'CTYPE2': 'DEC--TAN-SIP'
}
return [header[k] == v for k, v in expected_wcs_ctypes.items()]
ccd_original = CCDData.read(data_file)
# After initialization the keywords should be in the WCS, not in the
# meta.
with fits.open(data_file) as raw:
good_ctype = check_wcs_ctypes(raw[0].header)
assert all(good_ctype)
ccd_new = ccd_original.to_hdu()
good_ctype = check_wcs_ctypes(ccd_new[0].header)
assert all(good_ctype)
# Try converting to header with wcs_relax=False and
# the header should contain the CTYPE keywords without
# the -SIP
ccd_no_relax = ccd_original.to_hdu(wcs_relax=False)
good_ctype = check_wcs_ctypes(ccd_no_relax[0].header)
assert not any(good_ctype)
assert ccd_no_relax[0].header['CTYPE1'] == 'RA---TAN'
assert ccd_no_relax[0].header['CTYPE2'] == 'DEC--TAN'
@pytest.mark.parametrize('operation',
['multiply', 'divide', 'add', 'subtract'])
def test_mask_arithmetic_ccd(operation):
ccd_data = create_ccd_data()
ccd_data2 = ccd_data.copy()
ccd_data.mask = (ccd_data.data > 0)
method = getattr(ccd_data, operation)
result = method(ccd_data2)
np.testing.assert_equal(result.mask, ccd_data.mask)
def test_write_read_multiextensionfits_mask_default(tmpdir):
# Test that if a mask is present the mask is saved and loaded by default.
ccd_data = create_ccd_data()
ccd_data.mask = ccd_data.data > 10
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename)
ccd_after = CCDData.read(filename)
assert ccd_after.mask is not None
np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask)
@pytest.mark.parametrize(
'uncertainty_type',
[StdDevUncertainty, VarianceUncertainty, InverseVariance])
def test_write_read_multiextensionfits_uncertainty_default(
tmpdir, uncertainty_type):
# Test that if a uncertainty is present it is saved and loaded by default.
ccd_data = create_ccd_data()
ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename)
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is not None
assert type(ccd_after.uncertainty) is uncertainty_type
np.testing.assert_array_equal(ccd_data.uncertainty.array,
ccd_after.uncertainty.array)
@pytest.mark.parametrize(
'uncertainty_type',
[StdDevUncertainty, VarianceUncertainty, InverseVariance])
def test_write_read_multiextensionfits_uncertainty_different_uncertainty_key(
tmpdir, uncertainty_type):
# Test that if a uncertainty is present it is saved and loaded by default.
ccd_data = create_ccd_data()
ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename, key_uncertainty_type='Blah')
ccd_after = CCDData.read(filename, key_uncertainty_type='Blah')
assert ccd_after.uncertainty is not None
assert type(ccd_after.uncertainty) is uncertainty_type
np.testing.assert_array_equal(ccd_data.uncertainty.array,
ccd_after.uncertainty.array)
def test_write_read_multiextensionfits_not(tmpdir):
# Test that writing mask and uncertainty can be disabled
ccd_data = create_ccd_data()
ccd_data.mask = ccd_data.data > 10
ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None)
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is None
assert ccd_after.mask is None
def test_write_read_multiextensionfits_custom_ext_names(tmpdir):
# Test writing mask, uncertainty in another extension than default
ccd_data = create_ccd_data()
ccd_data.mask = ccd_data.data > 10
ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename, hdu_mask='Fun', hdu_uncertainty='NoFun')
# Try reading with defaults extension names
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is None
assert ccd_after.mask is None
# Try reading with custom extension names
ccd_after = CCDData.read(filename, hdu_mask='Fun', hdu_uncertainty='NoFun')
assert ccd_after.uncertainty is not None
assert ccd_after.mask is not None
np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask)
np.testing.assert_array_equal(ccd_data.uncertainty.array,
ccd_after.uncertainty.array)
def test_read_old_style_multiextensionfits(tmpdir):
# Regression test for https://github.com/astropy/ccdproc/issues/664
#
# Prior to astropy 3.1 there was no uncertainty type saved
# in the multiextension fits files generated by CCDData
# because the uncertainty had to be StandardDevUncertainty.
#
# Current version should be able to read those in.
#
size = 4
# Value of the variables below are not important to the test.
data = np.zeros([size, size])
mask = data > 0.9
uncert = np.sqrt(data)
ccd = CCDData(data=data, mask=mask, uncertainty=uncert, unit='adu')
# We'll create the file manually to ensure we have the
# right extension names and no uncertainty type.
hdulist = ccd.to_hdu()
del hdulist[2].header['UTYPE']
file_name = tmpdir.join('old_ccddata_mef.fits').strpath
hdulist.writeto(file_name)
ccd = CCDData.read(file_name)
assert isinstance(ccd.uncertainty, StdDevUncertainty)
def test_wcs():
ccd_data = create_ccd_data()
wcs = WCS(naxis=2)
ccd_data.wcs = wcs
assert ccd_data.wcs is wcs
def test_recognized_fits_formats_for_read_write(tmpdir):
# These are the extensions that are supposed to be supported.
ccd_data = create_ccd_data()
supported_extensions = ['fit', 'fits', 'fts']
for ext in supported_extensions:
path = tmpdir.join(f"test.{ext}")
ccd_data.write(path.strpath)
from_disk = CCDData.read(path.strpath)
assert (ccd_data.data == from_disk.data).all()
def test_stddevuncertainty_compat_descriptor_no_parent():
with pytest.raises(MissingDataAssociationException):
StdDevUncertainty(np.ones((10, 10))).parent_nddata
def test_stddevuncertainty_compat_descriptor_no_weakref():
# TODO: Remove this test if astropy 1.0 isn't supported anymore
# This test might create a Memoryleak on purpose, so the last lines after
# the assert are IMPORTANT cleanup.
ccd = CCDData(np.ones((10, 10)), unit='')
uncert = StdDevUncertainty(np.ones((10, 10)))
uncert._parent_nddata = ccd
assert uncert.parent_nddata is ccd
uncert._parent_nddata = None
# https://github.com/astropy/astropy/issues/7595
def test_read_returns_image(tmpdir):
# Test if CCData.read returns a image when reading a fits file containing
# a table and image, in that order.
tbl = Table(np.ones(10).reshape(5, 2))
img = np.ones((5, 5))
hdul = fits.HDUList(hdus=[fits.PrimaryHDU(), fits.TableHDU(tbl.as_array()),
fits.ImageHDU(img)])
filename = tmpdir.join('table_image.fits').strpath
hdul.writeto(filename)
ccd = CCDData.read(filename, unit='adu')
# Expecting to get (5, 5), the size of the image
assert ccd.data.shape == (5, 5)
# https://github.com/astropy/astropy/issues/9664
def test_sliced_ccdata_to_hdu():
wcs = WCS(naxis=2)
wcs.wcs.crpix = 10, 10
ccd = CCDData(np.ones((10, 10)), wcs=wcs, unit='pixel')
trimmed = ccd[2:-2, 2:-2]
hdul = trimmed.to_hdu()
assert isinstance(hdul, fits.HDUList)
assert hdul[0].header['CRPIX1'] == 8
assert hdul[0].header['CRPIX2'] == 8
|
08a96a6c0482e040780ea4778e82acbd5f76d8d95e05a07dd0b7eb282eabcecf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pickle
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from astropy.nddata.nduncertainty import (StdDevUncertainty,
VarianceUncertainty,
InverseVariance,
NDUncertainty,
IncompatibleUncertaintiesException,
MissingDataAssociationException,
UnknownUncertainty)
from astropy.nddata.nddata import NDData
from astropy.nddata.compat import NDDataArray
from astropy.nddata.ccddata import CCDData
from astropy import units as u
# Regarding setter tests:
# No need to test setters since the uncertainty is considered immutable after
# creation except of the parent_nddata attribute and this accepts just
# everything.
# Additionally they should be covered by NDData, NDArithmeticMixin which rely
# on it
# Regarding propagate, _convert_uncert, _propagate_* tests:
# They should be covered by NDArithmeticMixin since there is generally no need
# to test them without this mixin.
# Regarding __getitem__ tests:
# Should be covered by NDSlicingMixin.
# Regarding StdDevUncertainty tests:
# This subclass only overrides the methods for propagation so the same
# they should be covered in NDArithmeticMixin.
# Not really fake but the minimum an uncertainty has to override not to be
# abstract.
class FakeUncertainty(NDUncertainty):
@property
def uncertainty_type(self):
return 'fake'
def _data_unit_to_uncertainty_unit(self, value):
return None
def _propagate_add(self, data, final_data):
pass
def _propagate_subtract(self, data, final_data):
pass
def _propagate_multiply(self, data, final_data):
pass
def _propagate_divide(self, data, final_data):
pass
# Test the fake (added also StdDevUncertainty which should behave identical)
# the list of classes used for parametrization in tests below
uncertainty_types_to_be_tested = [
FakeUncertainty,
StdDevUncertainty,
VarianceUncertainty,
InverseVariance,
UnknownUncertainty
]
uncertainty_types_with_conversion_support = (
StdDevUncertainty, VarianceUncertainty, InverseVariance)
uncertainty_types_without_conversion_support = (
FakeUncertainty, UnknownUncertainty)
@pytest.mark.parametrize(('UncertClass'), uncertainty_types_to_be_tested)
def test_init_fake_with_list(UncertClass):
fake_uncert = UncertClass([1, 2, 3])
assert_array_equal(fake_uncert.array, np.array([1, 2, 3]))
# Copy makes no difference since casting a list to an np.ndarray always
# makes a copy.
# But let's give the uncertainty a unit too
fake_uncert = UncertClass([1, 2, 3], unit=u.adu)
assert_array_equal(fake_uncert.array, np.array([1, 2, 3]))
assert fake_uncert.unit is u.adu
@pytest.mark.parametrize(('UncertClass'), uncertainty_types_to_be_tested)
def test_init_fake_with_ndarray(UncertClass):
uncert = np.arange(100).reshape(10, 10)
fake_uncert = UncertClass(uncert)
# Numpy Arrays are copied by default
assert_array_equal(fake_uncert.array, uncert)
assert fake_uncert.array is not uncert
# Now try it without copy
fake_uncert = UncertClass(uncert, copy=False)
assert fake_uncert.array is uncert
# let's provide a unit
fake_uncert = UncertClass(uncert, unit=u.adu)
assert_array_equal(fake_uncert.array, uncert)
assert fake_uncert.array is not uncert
assert fake_uncert.unit is u.adu
@pytest.mark.parametrize(('UncertClass'), uncertainty_types_to_be_tested)
def test_init_fake_with_quantity(UncertClass):
uncert = np.arange(10).reshape(2, 5) * u.adu
fake_uncert = UncertClass(uncert)
# Numpy Arrays are copied by default
assert_array_equal(fake_uncert.array, uncert.value)
assert fake_uncert.array is not uncert.value
assert fake_uncert.unit is u.adu
# Try without copy (should not work, quantity.value always returns a copy)
fake_uncert = UncertClass(uncert, copy=False)
assert fake_uncert.array is not uncert.value
assert fake_uncert.unit is u.adu
# Now try with an explicit unit parameter too
fake_uncert = UncertClass(uncert, unit=u.m)
assert_array_equal(fake_uncert.array, uncert.value) # No conversion done
assert fake_uncert.array is not uncert.value
assert fake_uncert.unit is u.m # It took the explicit one
@pytest.mark.parametrize(('UncertClass'), uncertainty_types_to_be_tested)
def test_init_fake_with_fake(UncertClass):
uncert = np.arange(5).reshape(5, 1)
fake_uncert1 = UncertClass(uncert)
fake_uncert2 = UncertClass(fake_uncert1)
assert_array_equal(fake_uncert2.array, uncert)
assert fake_uncert2.array is not uncert
# Without making copies
fake_uncert1 = UncertClass(uncert, copy=False)
fake_uncert2 = UncertClass(fake_uncert1, copy=False)
assert_array_equal(fake_uncert2.array, fake_uncert1.array)
assert fake_uncert2.array is fake_uncert1.array
# With a unit
uncert = np.arange(5).reshape(5, 1) * u.adu
fake_uncert1 = UncertClass(uncert)
fake_uncert2 = UncertClass(fake_uncert1)
assert_array_equal(fake_uncert2.array, uncert.value)
assert fake_uncert2.array is not uncert.value
assert fake_uncert2.unit is u.adu
# With a unit and an explicit unit-parameter
fake_uncert2 = UncertClass(fake_uncert1, unit=u.cm)
assert_array_equal(fake_uncert2.array, uncert.value)
assert fake_uncert2.array is not uncert.value
assert fake_uncert2.unit is u.cm
@pytest.mark.parametrize(('UncertClass'), uncertainty_types_to_be_tested)
def test_init_fake_with_somethingElse(UncertClass):
# What about a dict?
uncert = {'rdnoise': 2.9, 'gain': 0.6}
fake_uncert = UncertClass(uncert)
assert fake_uncert.array == uncert
# We can pass a unit too but since we cannot do uncertainty propagation
# the interpretation is up to the user
fake_uncert = UncertClass(uncert, unit=u.s)
assert fake_uncert.array == uncert
assert fake_uncert.unit is u.s
# So, now check what happens if copy is False
fake_uncert = UncertClass(uncert, copy=False)
assert fake_uncert.array == uncert
assert id(fake_uncert) != id(uncert)
# dicts cannot be referenced without copy
# TODO : Find something that can be referenced without copy :-)
def test_init_fake_with_StdDevUncertainty():
# Different instances of uncertainties are not directly convertible so this
# should fail
uncert = np.arange(5).reshape(5, 1)
std_uncert = StdDevUncertainty(uncert)
with pytest.raises(IncompatibleUncertaintiesException):
FakeUncertainty(std_uncert)
# Ok try it the other way around
fake_uncert = FakeUncertainty(uncert)
with pytest.raises(IncompatibleUncertaintiesException):
StdDevUncertainty(fake_uncert)
def test_uncertainty_type():
fake_uncert = FakeUncertainty([10, 2])
assert fake_uncert.uncertainty_type == 'fake'
std_uncert = StdDevUncertainty([10, 2])
assert std_uncert.uncertainty_type == 'std'
var_uncert = VarianceUncertainty([10, 2])
assert var_uncert.uncertainty_type == 'var'
ivar_uncert = InverseVariance([10, 2])
assert ivar_uncert.uncertainty_type == 'ivar'
def test_uncertainty_correlated():
fake_uncert = FakeUncertainty([10, 2])
assert not fake_uncert.supports_correlated
std_uncert = StdDevUncertainty([10, 2])
assert std_uncert.supports_correlated
def test_for_leak_with_uncertainty():
# Regression test for memory leak because of cyclic references between
# NDData and uncertainty
from collections import defaultdict
from gc import get_objects
def test_leak(func, specific_objects=None):
"""Function based on gc.get_objects to determine if any object or
a specific object leaks.
It requires a function to be given and if any objects survive the
function scope it's considered a leak (so don't return anything).
"""
before = defaultdict(int)
for i in get_objects():
before[type(i)] += 1
func()
after = defaultdict(int)
for i in get_objects():
after[type(i)] += 1
if specific_objects is None:
assert all(after[k] - before[k] == 0 for k in after)
else:
assert after[specific_objects] - before[specific_objects] == 0
def non_leaker_nddata():
# Without uncertainty there is no reason to assume that there is a
# memory leak but test it nevertheless.
NDData(np.ones(100))
def leaker_nddata():
# With uncertainty there was a memory leak!
NDData(np.ones(100), uncertainty=StdDevUncertainty(np.ones(100)))
test_leak(non_leaker_nddata, NDData)
test_leak(leaker_nddata, NDData)
# Same for NDDataArray:
from astropy.nddata.compat import NDDataArray
def non_leaker_nddataarray():
NDDataArray(np.ones(100))
def leaker_nddataarray():
NDDataArray(np.ones(100), uncertainty=StdDevUncertainty(np.ones(100)))
test_leak(non_leaker_nddataarray, NDDataArray)
test_leak(leaker_nddataarray, NDDataArray)
def test_for_stolen_uncertainty():
# Sharing uncertainties should not overwrite the parent_nddata attribute
ndd1 = NDData(1, uncertainty=1)
ndd2 = NDData(2, uncertainty=ndd1.uncertainty)
# uncertainty.parent_nddata.data should be the original data!
assert ndd1.uncertainty.parent_nddata.data == ndd1.data
assert ndd2.uncertainty.parent_nddata.data == ndd2.data
def test_stddevuncertainty_pickle():
uncertainty = StdDevUncertainty(np.ones(3), unit=u.m)
uncertainty_restored = pickle.loads(pickle.dumps(uncertainty))
np.testing.assert_array_equal(uncertainty.array, uncertainty_restored.array)
assert uncertainty.unit == uncertainty_restored.unit
with pytest.raises(MissingDataAssociationException):
uncertainty_restored.parent_nddata
@pytest.mark.parametrize(('UncertClass'), uncertainty_types_to_be_tested)
def test_quantity(UncertClass):
fake_uncert = UncertClass([1, 2, 3], unit=u.adu)
assert isinstance(fake_uncert.quantity, u.Quantity)
assert fake_uncert.quantity.unit.is_equivalent(u.adu)
fake_uncert_nounit = UncertClass([1, 2, 3])
assert isinstance(fake_uncert_nounit.quantity, u.Quantity)
assert fake_uncert_nounit.quantity.unit.is_equivalent(u.dimensionless_unscaled)
@pytest.mark.parametrize(('UncertClass'),
[VarianceUncertainty,
StdDevUncertainty,
InverseVariance])
def test_setting_uncertainty_unit_results_in_unit_object(UncertClass):
v = UncertClass([1, 1])
v.unit = 'electron'
assert isinstance(v.unit, u.UnitBase)
@pytest.mark.parametrize('NDClass', [NDData, NDDataArray, CCDData])
@pytest.mark.parametrize(('UncertClass'),
[VarianceUncertainty,
StdDevUncertainty,
InverseVariance])
def test_changing_unit_to_value_inconsistent_with_parent_fails(NDClass,
UncertClass):
ndd1 = NDClass(1, unit='adu')
v = UncertClass(1)
# Sets the uncertainty unit to whatever makes sense with this data.
ndd1.uncertainty = v
with pytest.raises(u.UnitConversionError):
# Nothing special about 15 except no one would ever use that unit
v.unit = ndd1.unit ** 15
@pytest.mark.parametrize('NDClass', [NDData, NDDataArray, CCDData])
@pytest.mark.parametrize(('UncertClass, expected_unit'),
[(VarianceUncertainty, u.adu ** 2),
(StdDevUncertainty, u.adu),
(InverseVariance, 1 / u.adu ** 2)])
def test_assigning_uncertainty_to_parent_gives_correct_unit(NDClass,
UncertClass,
expected_unit):
# Does assigning a unitless uncertainty to an NDData result in the
# expected unit?
ndd = NDClass([1, 1], unit=u.adu)
v = UncertClass([1, 1])
ndd.uncertainty = v
assert v.unit == expected_unit
@pytest.mark.parametrize('NDClass', [NDData, NDDataArray, CCDData])
@pytest.mark.parametrize(('UncertClass, expected_unit'),
[(VarianceUncertainty, u.adu ** 2),
(StdDevUncertainty, u.adu),
(InverseVariance, 1 / u.adu ** 2)])
def test_assigning_uncertainty_with_unit_to_parent_with_unit(NDClass,
UncertClass,
expected_unit):
# Does assigning an uncertainty with an appropriate unit to an NDData
# with a unit work?
ndd = NDClass([1, 1], unit=u.adu)
v = UncertClass([1, 1], unit=expected_unit)
ndd.uncertainty = v
assert v.unit == expected_unit
@pytest.mark.parametrize('NDClass', [NDData, NDDataArray, CCDData])
@pytest.mark.parametrize(('UncertClass'),
[(VarianceUncertainty),
(StdDevUncertainty),
(InverseVariance)])
def test_assigning_uncertainty_with_bad_unit_to_parent_fails(NDClass,
UncertClass):
# Does assigning an uncertainty with a non-matching unit to an NDData
# with a unit work?
ndd = NDClass([1, 1], unit=u.adu)
# Set the unit to something inconsistent with ndd's unit
v = UncertClass([1, 1], unit=u.second)
with pytest.raises(u.UnitConversionError):
ndd.uncertainty = v
@pytest.mark.parametrize('UncertClass', uncertainty_types_with_conversion_support)
def test_self_conversion_via_variance_supported(UncertClass):
uncert = np.arange(1, 11).reshape(2, 5) * u.adu
start_uncert = UncertClass(uncert)
final_uncert = start_uncert.represent_as(UncertClass)
assert_array_equal(start_uncert.array, final_uncert.array)
assert start_uncert.unit == final_uncert.unit
@pytest.mark.parametrize(
'UncertClass,to_variance_func',
zip(uncertainty_types_with_conversion_support,
(lambda x: x ** 2, lambda x: x, lambda x: 1 / x))
)
def test_conversion_to_from_variance_supported(UncertClass, to_variance_func):
uncert = np.arange(1, 11).reshape(2, 5) * u.adu
start_uncert = UncertClass(uncert)
var_uncert = start_uncert.represent_as(VarianceUncertainty)
final_uncert = var_uncert.represent_as(UncertClass)
assert_allclose(to_variance_func(start_uncert.array), var_uncert.array)
assert_array_equal(start_uncert.array, final_uncert.array)
assert start_uncert.unit == final_uncert.unit
@pytest.mark.parametrize('UncertClass', uncertainty_types_without_conversion_support)
def test_self_conversion_via_variance_not_supported(UncertClass):
uncert = np.arange(1, 11).reshape(2, 5) * u.adu
start_uncert = UncertClass(uncert)
with pytest.raises(TypeError):
final_uncert = start_uncert.represent_as(UncertClass)
|
08623c197581b569da8595f12ae408e598e5ea52920df9acdec3ad7de845eb86 | # This file includes the definition of a mix-in class that provides the low-
# and high-level WCS API to the astropy.wcs.WCS object. We keep this code
# isolated in this mix-in class to avoid making the main wcs.py file too
# long.
import warnings
import numpy as np
from astropy import units as u
from astropy.coordinates import SpectralCoord, Galactic, ICRS
from astropy.coordinates.spectral_coordinate import update_differentials_to_match, attach_zero_velocities
from astropy.utils.exceptions import AstropyUserWarning
from astropy.constants import c
from .low_level_api import BaseLowLevelWCS
from .high_level_api import HighLevelWCSMixin
from .wrappers import SlicedLowLevelWCS
__all__ = ['custom_ctype_to_ucd_mapping', 'SlicedFITSWCS', 'FITSWCSAPIMixin']
C_SI = c.si.value
VELOCITY_FRAMES = {
'GEOCENT': 'gcrs',
'BARYCENT': 'icrs',
'HELIOCENT': 'hcrs',
'LSRK': 'lsrk',
'LSRD': 'lsrd'
}
# The spectra velocity frames below are needed for FITS spectral WCS
# (see Greisen 06 table 12) but aren't yet defined as real
# astropy.coordinates frames, so we instead define them here as instances
# of existing coordinate frames with offset velocities. In future we should
# make these real frames so that users can more easily recognize these
# velocity frames when used in SpectralCoord.
# This frame is defined as a velocity of 220 km/s in the
# direction of l=90, b=0. The rotation velocity is defined
# in:
#
# Kerr and Lynden-Bell 1986, Review of galactic constants.
#
# NOTE: this may differ from the assumptions of galcen_v_sun
# in the Galactocentric frame - the value used here is
# the one adopted by the WCS standard for spectral
# transformations.
VELOCITY_FRAMES['GALACTOC'] = Galactic(u=0 * u.km, v=0 * u.km, w=0 * u.km,
U=0 * u.km / u.s, V=-220 * u.km / u.s, W=0 * u.km / u.s,
representation_type='cartesian',
differential_type='cartesian')
# This frame is defined as a velocity of 300 km/s in the
# direction of l=90, b=0. This is defined in:
#
# Transactions of the IAU Vol. XVI B Proceedings of the
# 16th General Assembly, Reports of Meetings of Commissions:
# Comptes Rendus Des Séances Des Commissions, Commission 28,
# p201.
#
# Note that these values differ from those used by CASA
# (308 km/s towards l=105, b=-7) but we use the above values
# since these are the ones defined in Greisen et al (2006).
VELOCITY_FRAMES['LOCALGRP'] = Galactic(u=0 * u.km, v=0 * u.km, w=0 * u.km,
U=0 * u.km / u.s, V=-300 * u.km / u.s, W=0 * u.km / u.s,
representation_type='cartesian',
differential_type='cartesian')
# This frame is defined as a velocity of 368 km/s in the
# direction of l=263.85, b=48.25. This is defined in:
#
# Bennett et al. (2003), First-Year Wilkinson Microwave
# Anisotropy Probe (WMAP) Observations: Preliminary Maps
# and Basic Results
#
# Note that in that paper, the dipole is expressed as a
# temperature (T=3.346 +/- 0.017mK)
VELOCITY_FRAMES['CMBDIPOL'] = Galactic(l=263.85 * u.deg, b=48.25 * u.deg, distance=0 * u.km,
radial_velocity=-(3.346e-3 / 2.725 * c).to(u.km/u.s))
# Mapping from CTYPE axis name to UCD1
CTYPE_TO_UCD1 = {
# Celestial coordinates
'RA': 'pos.eq.ra',
'DEC': 'pos.eq.dec',
'GLON': 'pos.galactic.lon',
'GLAT': 'pos.galactic.lat',
'ELON': 'pos.ecliptic.lon',
'ELAT': 'pos.ecliptic.lat',
'TLON': 'pos.bodyrc.lon',
'TLAT': 'pos.bodyrc.lat',
'HPLT': 'custom:pos.helioprojective.lat',
'HPLN': 'custom:pos.helioprojective.lon',
'HPRZ': 'custom:pos.helioprojective.z',
'HGLN': 'custom:pos.heliographic.stonyhurst.lon',
'HGLT': 'custom:pos.heliographic.stonyhurst.lat',
'CRLN': 'custom:pos.heliographic.carrington.lon',
'CRLT': 'custom:pos.heliographic.carrington.lat',
'SOLX': 'custom:pos.heliocentric.x',
'SOLY': 'custom:pos.heliocentric.y',
'SOLZ': 'custom:pos.heliocentric.z',
# Spectral coordinates (WCS paper 3)
'FREQ': 'em.freq', # Frequency
'ENER': 'em.energy', # Energy
'WAVN': 'em.wavenumber', # Wavenumber
'WAVE': 'em.wl', # Vacuum wavelength
'VRAD': 'spect.dopplerVeloc.radio', # Radio velocity
'VOPT': 'spect.dopplerVeloc.opt', # Optical velocity
'ZOPT': 'src.redshift', # Redshift
'AWAV': 'em.wl', # Air wavelength
'VELO': 'spect.dopplerVeloc', # Apparent radial velocity
'BETA': 'custom:spect.doplerVeloc.beta', # Beta factor (v/c)
'STOKES': 'phys.polarization.stokes', # STOKES parameters
# Time coordinates (https://www.aanda.org/articles/aa/pdf/2015/02/aa24653-14.pdf)
'TIME': 'time',
'TAI': 'time',
'TT': 'time',
'TDT': 'time',
'ET': 'time',
'IAT': 'time',
'UT1': 'time',
'UTC': 'time',
'GMT': 'time',
'GPS': 'time',
'TCG': 'time',
'TCB': 'time',
'TDB': 'time',
'LOCAL': 'time',
# Distance coordinates
'DIST': 'pos.distance',
'DSUN': 'custom:pos.distance.sunToObserver'
# UT() and TT() are handled separately in world_axis_physical_types
}
# Keep a list of additional custom mappings that have been registered. This
# is kept as a list in case nested context managers are used
CTYPE_TO_UCD1_CUSTOM = []
class custom_ctype_to_ucd_mapping:
"""
A context manager that makes it possible to temporarily add new CTYPE to
UCD1+ mapping used by :attr:`FITSWCSAPIMixin.world_axis_physical_types`.
Parameters
----------
mapping : dict
A dictionary mapping a CTYPE value to a UCD1+ value
Examples
--------
Consider a WCS with the following CTYPE::
>>> from astropy.wcs import WCS
>>> wcs = WCS(naxis=1)
>>> wcs.wcs.ctype = ['SPAM']
By default, :attr:`FITSWCSAPIMixin.world_axis_physical_types` returns `None`,
but this can be overridden::
>>> wcs.world_axis_physical_types
[None]
>>> with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
... wcs.world_axis_physical_types
['food.spam']
"""
def __init__(self, mapping):
CTYPE_TO_UCD1_CUSTOM.insert(0, mapping)
self.mapping = mapping
def __enter__(self):
pass
def __exit__(self, type, value, tb):
CTYPE_TO_UCD1_CUSTOM.remove(self.mapping)
class SlicedFITSWCS(SlicedLowLevelWCS, HighLevelWCSMixin):
pass
class FITSWCSAPIMixin(BaseLowLevelWCS, HighLevelWCSMixin):
"""
A mix-in class that is intended to be inherited by the
:class:`~astropy.wcs.WCS` class and provides the low- and high-level WCS API
"""
@property
def pixel_n_dim(self):
return self.naxis
@property
def world_n_dim(self):
return len(self.wcs.ctype)
@property
def array_shape(self):
if self.pixel_shape is None:
return None
else:
return self.pixel_shape[::-1]
@array_shape.setter
def array_shape(self, value):
if value is None:
self.pixel_shape = None
else:
self.pixel_shape = value[::-1]
@property
def pixel_shape(self):
if self._naxis == [0, 0]:
return None
else:
return tuple(self._naxis)
@pixel_shape.setter
def pixel_shape(self, value):
if value is None:
self._naxis = [0, 0]
else:
if len(value) != self.naxis:
raise ValueError("The number of data axes, "
"{}, does not equal the "
"shape {}.".format(self.naxis, len(value)))
self._naxis = list(value)
@property
def pixel_bounds(self):
return self._pixel_bounds
@pixel_bounds.setter
def pixel_bounds(self, value):
if value is None:
self._pixel_bounds = value
else:
if len(value) != self.naxis:
raise ValueError("The number of data axes, "
"{}, does not equal the number of "
"pixel bounds {}.".format(self.naxis, len(value)))
self._pixel_bounds = list(value)
@property
def world_axis_physical_types(self):
types = []
# TODO: need to support e.g. TT(TAI)
for ctype in self.wcs.ctype:
if ctype.upper().startswith(('UT(', 'TT(')):
types.append('time')
else:
ctype_name = ctype.split('-')[0]
for custom_mapping in CTYPE_TO_UCD1_CUSTOM:
if ctype_name in custom_mapping:
types.append(custom_mapping[ctype_name])
break
else:
types.append(CTYPE_TO_UCD1.get(ctype_name.upper(), None))
return types
@property
def world_axis_units(self):
units = []
for unit in self.wcs.cunit:
if unit is None:
unit = ''
elif isinstance(unit, u.Unit):
unit = unit.to_string(format='vounit')
else:
try:
unit = u.Unit(unit).to_string(format='vounit')
except u.UnitsError:
unit = ''
units.append(unit)
return units
@property
def world_axis_names(self):
return list(self.wcs.cname)
@property
def axis_correlation_matrix(self):
# If there are any distortions present, we assume that there may be
# correlations between all axes. Maybe if some distortions only apply
# to the image plane we can improve this?
if self.has_distortion:
return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)
# Assuming linear world coordinates along each axis, the correlation
# matrix would be given by whether or not the PC matrix is zero
matrix = self.wcs.get_pc() != 0
# We now need to check specifically for celestial coordinates since
# these can assume correlations because of spherical distortions. For
# each celestial coordinate we copy over the pixel dependencies from
# the other celestial coordinates.
celestial = (self.wcs.axis_types // 1000) % 10 == 2
celestial_indices = np.nonzero(celestial)[0]
for world1 in celestial_indices:
for world2 in celestial_indices:
if world1 != world2:
matrix[world1] |= matrix[world2]
matrix[world2] |= matrix[world1]
return matrix
def pixel_to_world_values(self, *pixel_arrays):
world = self.all_pix2world(*pixel_arrays, 0)
return world[0] if self.world_n_dim == 1 else tuple(world)
def world_to_pixel_values(self, *world_arrays):
# avoid circular import
from astropy.wcs.wcs import NoConvergence
try:
pixel = self.all_world2pix(*world_arrays, 0)
except NoConvergence as e:
warnings.warn(str(e))
# use best_solution contained in the exception and format the same
# way as all_world2pix does (using _array_converter)
pixel = self._array_converter(lambda *args: e.best_solution,
'input', *world_arrays, 0)
return pixel[0] if self.pixel_n_dim == 1 else tuple(pixel)
@property
def world_axis_object_components(self):
return self._get_components_and_classes()[0]
@property
def world_axis_object_classes(self):
return self._get_components_and_classes()[1]
@property
def serialized_classes(self):
return False
def _get_components_and_classes(self):
# The aim of this function is to return whatever is needed for
# world_axis_object_components and world_axis_object_classes. It's easier
# to figure it out in one go and then return the values and let the
# properties return part of it.
# Since this method might get called quite a few times, we need to cache
# it. We start off by defining a hash based on the attributes of the
# WCS that matter here (we can't just use the WCS object as a hash since
# it is mutable)
wcs_hash = (self.naxis,
list(self.wcs.ctype),
list(self.wcs.cunit),
self.wcs.radesys,
self.wcs.specsys,
self.wcs.equinox,
self.wcs.dateobs,
self.wcs.lng,
self.wcs.lat)
# If the cache is present, we need to check that the 'hash' matches.
if getattr(self, '_components_and_classes_cache', None) is not None:
cache = self._components_and_classes_cache
if cache[0] == wcs_hash:
return cache[1]
else:
self._components_and_classes_cache = None
# Avoid circular imports by importing here
from astropy.wcs.utils import wcs_to_celestial_frame
from astropy.coordinates import SkyCoord, EarthLocation
from astropy.time.formats import FITS_DEPRECATED_SCALES
from astropy.time import Time, TimeDelta
components = [None] * self.naxis
classes = {}
# Let's start off by checking whether the WCS has a pair of celestial
# components
if self.has_celestial:
try:
celestial_frame = wcs_to_celestial_frame(self)
except ValueError:
# Some WCSes, e.g. solar, can be recognized by WCSLIB as being
# celestial but we don't necessarily have frames for them.
celestial_frame = None
else:
kwargs = {}
kwargs['frame'] = celestial_frame
kwargs['unit'] = u.deg
classes['celestial'] = (SkyCoord, (), kwargs)
components[self.wcs.lng] = ('celestial', 0, 'spherical.lon.degree')
components[self.wcs.lat] = ('celestial', 1, 'spherical.lat.degree')
# Next, we check for spectral components
if self.has_spectral:
# Find index of spectral coordinate
ispec = self.wcs.spec
ctype = self.wcs.ctype[ispec][:4]
ctype = ctype.upper()
kwargs = {}
# Determine observer location and velocity
# TODO: determine how WCS standard would deal with observer on a
# spacecraft far from earth. For now assume the obsgeo parameters,
# if present, give the geocentric observer location.
if np.isnan(self.wcs.obsgeo[0]):
observer = None
else:
earth_location = EarthLocation(*self.wcs.obsgeo[:3], unit=u.m)
obstime = Time(self.wcs.mjdobs, format='mjd', scale='utc',
location=earth_location)
observer_location = SkyCoord(earth_location.get_itrs(obstime=obstime))
if self.wcs.specsys in VELOCITY_FRAMES:
frame = VELOCITY_FRAMES[self.wcs.specsys]
observer = observer_location.transform_to(frame)
if isinstance(frame, str):
observer = attach_zero_velocities(observer)
else:
observer = update_differentials_to_match(observer_location,
VELOCITY_FRAMES[self.wcs.specsys],
preserve_observer_frame=True)
elif self.wcs.specsys == 'TOPOCENT':
observer = attach_zero_velocities(observer_location)
else:
raise NotImplementedError(f'SPECSYS={self.wcs.specsys} not yet supported')
# Determine target
# This is tricker. In principle the target for each pixel is the
# celestial coordinates of the pixel, but we then need to be very
# careful about SSYSOBS which is tricky. For now, we set the
# target using the reference celestial coordinate in the WCS (if
# any).
if self.has_celestial and celestial_frame is not None:
# NOTE: celestial_frame was defined higher up
# NOTE: we set the distance explicitly to avoid warnings in SpectralCoord
target = SkyCoord(self.wcs.crval[self.wcs.lng] * self.wcs.cunit[self.wcs.lng],
self.wcs.crval[self.wcs.lat] * self.wcs.cunit[self.wcs.lat],
frame=celestial_frame,
distance=1000 * u.kpc)
target = attach_zero_velocities(target)
else:
target = None
# SpectralCoord does not work properly if either observer or target
# are not convertible to ICRS, so if this is the case, we (for now)
# drop the observer and target from the SpectralCoord and warn the
# user.
if observer is not None:
try:
observer.transform_to(ICRS())
except Exception:
warnings.warn('observer cannot be converted to ICRS, so will '
'not be set on SpectralCoord', AstropyUserWarning)
observer = None
if target is not None:
try:
target.transform_to(ICRS())
except Exception:
warnings.warn('target cannot be converted to ICRS, so will '
'not be set on SpectralCoord', AstropyUserWarning)
target = None
# NOTE: below we include Quantity in classes['spectral'] instead
# of SpectralCoord - this is because we want to also be able to
# accept plain quantities.
if ctype == 'ZOPT':
def spectralcoord_from_redshift(redshift):
if isinstance(redshift, SpectralCoord):
return redshift
return SpectralCoord((redshift + 1) * self.wcs.restwav,
unit=u.m, observer=observer, target=target)
def redshift_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
if (observer is None
or spectralcoord.observer is None
or spectralcoord.target is None):
if observer is None:
msg = 'No observer defined on WCS'
elif spectralcoord.observer is None:
msg = 'No observer defined on SpectralCoord'
else:
msg = 'No target defined on SpectralCoord'
warnings.warn(f'{msg}, SpectralCoord '
'will be converted without any velocity '
'frame change', AstropyUserWarning)
return spectralcoord.to_value(u.m) / self.wcs.restwav - 1.
else:
return spectralcoord.with_observer_stationary_relative_to(observer).to_value(u.m) / self.wcs.restwav - 1.
classes['spectral'] = (u.Quantity, (), {}, spectralcoord_from_redshift)
components[self.wcs.spec] = ('spectral', 0, redshift_from_spectralcoord)
elif ctype == 'BETA':
def spectralcoord_from_beta(beta):
if isinstance(beta, SpectralCoord):
return beta
return SpectralCoord(beta * C_SI,
unit=u.m / u.s,
doppler_convention='relativistic',
doppler_rest=self.wcs.restwav * u.m,
observer=observer, target=target)
def beta_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
doppler_equiv = u.doppler_relativistic(self.wcs.restwav * u.m)
if (observer is None
or spectralcoord.observer is None
or spectralcoord.target is None):
if observer is None:
msg = 'No observer defined on WCS'
elif spectralcoord.observer is None:
msg = 'No observer defined on SpectralCoord'
else:
msg = 'No target defined on SpectralCoord'
warnings.warn(f'{msg}, SpectralCoord '
'will be converted without any velocity '
'frame change', AstropyUserWarning)
return spectralcoord.to_value(u.m / u.s, doppler_equiv) / C_SI
else:
return spectralcoord.with_observer_stationary_relative_to(observer).to_value(u.m / u.s, doppler_equiv) / C_SI
classes['spectral'] = (u.Quantity, (), {}, spectralcoord_from_beta)
components[self.wcs.spec] = ('spectral', 0, beta_from_spectralcoord)
else:
kwargs['unit'] = self.wcs.cunit[ispec]
if self.wcs.restfrq > 0:
if ctype == 'VELO':
kwargs['doppler_convention'] = 'relativistic'
kwargs['doppler_rest'] = self.wcs.restfrq * u.Hz
elif ctype == 'VRAD':
kwargs['doppler_convention'] = 'radio'
kwargs['doppler_rest'] = self.wcs.restfrq * u.Hz
elif ctype == 'VOPT':
kwargs['doppler_convention'] = 'optical'
kwargs['doppler_rest'] = self.wcs.restwav * u.m
def spectralcoord_from_value(value):
if isinstance(value, SpectralCoord):
return value
return SpectralCoord(value, observer=observer, target=target, **kwargs)
def value_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
if (observer is None
or spectralcoord.observer is None
or spectralcoord.target is None):
if observer is None:
msg = 'No observer defined on WCS'
elif spectralcoord.observer is None:
msg = 'No observer defined on SpectralCoord'
else:
msg = 'No target defined on SpectralCoord'
warnings.warn(f'{msg}, SpectralCoord '
'will be converted without any velocity '
'frame change', AstropyUserWarning)
return spectralcoord.to_value(**kwargs)
else:
return spectralcoord.with_observer_stationary_relative_to(observer).to_value(**kwargs)
classes['spectral'] = (u.Quantity, (), {}, spectralcoord_from_value)
components[self.wcs.spec] = ('spectral', 0, value_from_spectralcoord)
# We can then make sure we correctly return Time objects where appropriate
# (https://www.aanda.org/articles/aa/pdf/2015/02/aa24653-14.pdf)
if 'time' in self.world_axis_physical_types:
multiple_time = self.world_axis_physical_types.count('time') > 1
for i in range(self.naxis):
if self.world_axis_physical_types[i] == 'time':
if multiple_time:
name = f'time.{i}'
else:
name = 'time'
# Initialize delta
reference_time_delta = None
# Extract time scale
scale = self.wcs.ctype[i].lower()
if scale == 'time':
if self.wcs.timesys:
scale = self.wcs.timesys.lower()
else:
scale = 'utc'
# Drop sub-scales
if '(' in scale:
pos = scale.index('(')
scale, subscale = scale[:pos], scale[pos+1:-1]
warnings.warn(f'Dropping unsupported sub-scale '
f'{subscale.upper()} from scale {scale.upper()}',
UserWarning)
# TODO: consider having GPS as a scale in Time
# For now GPS is not a scale, we approximate this by TAI - 19s
if scale == 'gps':
reference_time_delta = TimeDelta(19, format='sec')
scale = 'tai'
elif scale.upper() in FITS_DEPRECATED_SCALES:
scale = FITS_DEPRECATED_SCALES[scale.upper()]
elif scale not in Time.SCALES:
raise ValueError(f'Unrecognized time CTYPE={self.wcs.ctype[i]}')
# Determine location
trefpos = self.wcs.trefpos.lower()
if trefpos.startswith('topocent'):
# Note that some headers use TOPOCENT instead of TOPOCENTER
if np.any(np.isnan(self.wcs.obsgeo[:3])):
warnings.warn('Missing or incomplete observer location '
'information, setting location in Time to None',
UserWarning)
location = None
else:
location = EarthLocation(*self.wcs.obsgeo[:3], unit=u.m)
elif trefpos == 'geocenter':
location = EarthLocation(0, 0, 0, unit=u.m)
elif trefpos == '':
location = None
else:
# TODO: implement support for more locations when Time supports it
warnings.warn(f"Observation location '{trefpos}' is not "
"supported, setting location in Time to None", UserWarning)
location = None
reference_time = Time(np.nan_to_num(self.wcs.mjdref[0]),
np.nan_to_num(self.wcs.mjdref[1]),
format='mjd', scale=scale,
location=location)
if reference_time_delta is not None:
reference_time = reference_time + reference_time_delta
def time_from_reference_and_offset(offset):
if isinstance(offset, Time):
return offset
return reference_time + TimeDelta(offset, format='sec')
def offset_from_time_and_reference(time):
return (time - reference_time).sec
classes[name] = (Time, (), {}, time_from_reference_and_offset)
components[i] = (name, 0, offset_from_time_and_reference)
# Fallback: for any remaining components that haven't been identified, just
# return Quantity as the class to use
for i in range(self.naxis):
if components[i] is None:
name = self.wcs.ctype[i].split('-')[0].lower()
if name == '':
name = 'world'
while name in classes:
name += "_"
classes[name] = (u.Quantity, (), {'unit': self.wcs.cunit[i]})
components[i] = (name, 0, 'value')
# Keep a cached version of result
self._components_and_classes_cache = wcs_hash, (components, classes)
return components, classes
|
bec736a6e107d2cddbcd00b6501989dc84bd71e651b00e0a5eb9d698551b5303 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import os
from contextlib import nullcontext
from datetime import datetime
from packaging.version import Version
import pytest
import numpy as np
from numpy.testing import (
assert_allclose, assert_array_almost_equal, assert_array_almost_equal_nulp,
assert_array_equal)
from astropy import wcs
from astropy.wcs import _wcs # noqa
from astropy import units as u
from astropy.utils.data import (
get_pkg_data_filenames, get_pkg_data_contents, get_pkg_data_filename)
from astropy.utils.misc import NumpyRNGContext
from astropy.utils.exceptions import (
AstropyUserWarning, AstropyWarning, AstropyDeprecationWarning)
from astropy.tests.helper import assert_quantity_allclose
from astropy.io import fits
from astropy.coordinates import SkyCoord
from astropy.nddata import Cutout2D
_WCSLIB_VER = Version(_wcs.__version__)
# NOTE: User can choose to use system wcslib instead of bundled.
def ctx_for_v71_dateref_warnings():
if _WCSLIB_VER >= Version('7.1') and _WCSLIB_VER < Version('7.3'):
ctx = pytest.warns(
wcs.FITSFixedWarning,
match=r"'datfix' made the change 'Set DATE-REF to '1858-11-17' from MJD-REF'\.")
else:
ctx = nullcontext()
return ctx
class TestMaps:
def setup(self):
# get the list of the hdr files that we want to test
self._file_list = list(get_pkg_data_filenames(
"data/maps", pattern="*.hdr"))
def test_consistency(self):
# Check to see that we actually have the list we expect, so that we
# do not get in a situation where the list is empty or incomplete and
# the tests still seem to pass correctly.
# how many do we expect to see?
n_data_files = 28
assert len(self._file_list) == n_data_files, (
"test_spectra has wrong number data files: found {}, expected "
" {}".format(len(self._file_list), n_data_files))
def test_maps(self):
for filename in self._file_list:
# use the base name of the file, so we get more useful messages
# for failing tests.
filename = os.path.basename(filename)
# Now find the associated file in the installed wcs test directory.
header = get_pkg_data_contents(
os.path.join("data", "maps", filename), encoding='binary')
# finally run the test.
wcsobj = wcs.WCS(header)
world = wcsobj.wcs_pix2world([[97, 97]], 1)
assert_array_almost_equal(world, [[285.0, -66.25]], decimal=1)
pix = wcsobj.wcs_world2pix([[285.0, -66.25]], 1)
assert_array_almost_equal(pix, [[97, 97]], decimal=0)
class TestSpectra:
def setup(self):
self._file_list = list(get_pkg_data_filenames("data/spectra",
pattern="*.hdr"))
def test_consistency(self):
# Check to see that we actually have the list we expect, so that we
# do not get in a situation where the list is empty or incomplete and
# the tests still seem to pass correctly.
# how many do we expect to see?
n_data_files = 6
assert len(self._file_list) == n_data_files, (
"test_spectra has wrong number data files: found {}, expected "
" {}".format(len(self._file_list), n_data_files))
def test_spectra(self):
for filename in self._file_list:
# use the base name of the file, so we get more useful messages
# for failing tests.
filename = os.path.basename(filename)
# Now find the associated file in the installed wcs test directory.
header = get_pkg_data_contents(
os.path.join("data", "spectra", filename), encoding='binary')
# finally run the test.
if _WCSLIB_VER >= Version('7.4'):
ctx = pytest.warns(
wcs.FITSFixedWarning,
match=r"'datfix' made the change 'Set MJD-OBS to 53925\.853472 from DATE-OBS'\.") # noqa
else:
ctx = nullcontext()
with ctx:
all_wcs = wcs.find_all_wcs(header)
assert len(all_wcs) == 9
def test_fixes():
"""
From github issue #36
"""
header = get_pkg_data_contents('data/nonstandard_units.hdr', encoding='binary')
with pytest.raises(wcs.InvalidTransformError), pytest.warns(wcs.FITSFixedWarning) as w:
wcs.WCS(header, translate_units='dhs')
if Version('7.4') <= _WCSLIB_VER < Version('7.6'):
assert len(w) == 3
assert "'datfix' made the change 'Success'." in str(w.pop().message)
else:
assert len(w) == 2
first_wmsg = str(w[0].message)
assert 'unitfix' in first_wmsg and 'Hz' in first_wmsg and 'M/S' in first_wmsg
assert 'plane angle' in str(w[1].message) and 'm/s' in str(w[1].message)
# Ignore "PV2_2 = 0.209028857410973 invalid keyvalue" warning seen on Windows.
@pytest.mark.filterwarnings(r'ignore:PV2_2')
def test_outside_sky():
"""
From github issue #107
"""
header = get_pkg_data_contents(
'data/outside_sky.hdr', encoding='binary')
w = wcs.WCS(header)
assert np.all(np.isnan(w.wcs_pix2world([[100., 500.]], 0))) # outside sky
assert np.all(np.isnan(w.wcs_pix2world([[200., 200.]], 0))) # outside sky
assert not np.any(np.isnan(w.wcs_pix2world([[1000., 1000.]], 0)))
def test_pix2world():
"""
From github issue #1463
"""
# TODO: write this to test the expected output behavior of pix2world,
# currently this just makes sure it doesn't error out in unexpected ways
# (and compares `wcs.pc` and `result` values?)
filename = get_pkg_data_filename('data/sip2.fits')
with pytest.warns(wcs.FITSFixedWarning) as caught_warnings:
# this raises a warning unimportant for this testing the pix2world
# FITSFixedWarning(u'The WCS transformation has more axes (2) than
# the image it is associated with (0)')
ww = wcs.WCS(filename)
# might as well monitor for changing behavior
if Version('7.4') <= _WCSLIB_VER < Version('7.6'):
assert len(caught_warnings) == 2
else:
assert len(caught_warnings) == 1
n = 3
pixels = (np.arange(n) * np.ones((2, n))).T
result = ww.wcs_pix2world(pixels, 0, ra_dec_order=True)
# Catch #2791
ww.wcs_pix2world(pixels[..., 0], pixels[..., 1], 0, ra_dec_order=True)
# assuming that the data of sip2.fits doesn't change
answer = np.array([[0.00024976, 0.00023018],
[0.00023043, -0.00024997]])
assert np.allclose(ww.wcs.pc, answer, atol=1.e-8)
answer = np.array([[202.39265216, 47.17756518],
[202.39335826, 47.17754619],
[202.39406436, 47.1775272]])
assert np.allclose(result, answer, atol=1.e-8, rtol=1.e-10)
def test_load_fits_path():
fits_name = get_pkg_data_filename('data/sip.fits')
with pytest.warns(wcs.FITSFixedWarning):
wcs.WCS(fits_name)
def test_dict_init():
"""
Test that WCS can be initialized with a dict-like object
"""
# Dictionary with no actual WCS, returns identity transform
with ctx_for_v71_dateref_warnings():
w = wcs.WCS({})
xp, yp = w.wcs_world2pix(41., 2., 1)
assert_array_almost_equal_nulp(xp, 41., 10)
assert_array_almost_equal_nulp(yp, 2., 10)
# Valid WCS
hdr = {
'CTYPE1': 'GLON-CAR',
'CTYPE2': 'GLAT-CAR',
'CUNIT1': 'deg',
'CUNIT2': 'deg',
'CRPIX1': 1,
'CRPIX2': 1,
'CRVAL1': 40.,
'CRVAL2': 0.,
'CDELT1': -0.1,
'CDELT2': 0.1
}
if _WCSLIB_VER >= Version('7.1'):
hdr['DATEREF'] = '1858-11-17'
if _WCSLIB_VER >= Version('7.4'):
ctx = pytest.warns(
wcs.wcs.FITSFixedWarning,
match=r"'datfix' made the change 'Set MJDREF to 0\.000000 from DATEREF'\.")
else:
ctx = nullcontext()
with ctx:
w = wcs.WCS(hdr)
xp, yp = w.wcs_world2pix(41., 2., 0)
assert_array_almost_equal_nulp(xp, -10., 10)
assert_array_almost_equal_nulp(yp, 20., 10)
def test_extra_kwarg():
"""
Issue #444
"""
w = wcs.WCS()
with NumpyRNGContext(123456789):
data = np.random.rand(100, 2)
with pytest.raises(TypeError):
w.wcs_pix2world(data, origin=1)
def test_3d_shapes():
"""
Issue #444
"""
w = wcs.WCS(naxis=3)
with NumpyRNGContext(123456789):
data = np.random.rand(100, 3)
result = w.wcs_pix2world(data, 1)
assert result.shape == (100, 3)
result = w.wcs_pix2world(
data[..., 0], data[..., 1], data[..., 2], 1)
assert len(result) == 3
def test_preserve_shape():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = np.random.random((2, 3, 4))
xw, yw = w.wcs_pix2world(x, y, 1)
assert xw.shape == (2, 3, 4)
assert yw.shape == (2, 3, 4)
xp, yp = w.wcs_world2pix(x, y, 1)
assert xp.shape == (2, 3, 4)
assert yp.shape == (2, 3, 4)
def test_broadcasting():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = 1
xp, yp = w.wcs_world2pix(x, y, 1)
assert xp.shape == (2, 3, 4)
assert yp.shape == (2, 3, 4)
def test_shape_mismatch():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = np.random.random((3, 2, 4))
with pytest.raises(ValueError) as exc:
xw, yw = w.wcs_pix2world(x, y, 1)
assert exc.value.args[0] == "Coordinate arrays are not broadcastable to each other"
with pytest.raises(ValueError) as exc:
xp, yp = w.wcs_world2pix(x, y, 1)
assert exc.value.args[0] == "Coordinate arrays are not broadcastable to each other"
# There are some ambiguities that need to be worked around when
# naxis == 1
w = wcs.WCS(naxis=1)
x = np.random.random((42, 1))
xw = w.wcs_pix2world(x, 1)
assert xw.shape == (42, 1)
x = np.random.random((42,))
xw, = w.wcs_pix2world(x, 1)
assert xw.shape == (42,)
def test_invalid_shape():
# Issue #1395
w = wcs.WCS(naxis=2)
xy = np.random.random((2, 3))
with pytest.raises(ValueError) as exc:
w.wcs_pix2world(xy, 1)
assert exc.value.args[0] == 'When providing two arguments, the array must be of shape (N, 2)'
xy = np.random.random((2, 1))
with pytest.raises(ValueError) as exc:
w.wcs_pix2world(xy, 1)
assert exc.value.args[0] == 'When providing two arguments, the array must be of shape (N, 2)'
def test_warning_about_defunct_keywords():
header = get_pkg_data_contents('data/defunct_keywords.hdr', encoding='binary')
if Version('7.4') <= _WCSLIB_VER < Version('7.6'):
n_warn = 5
else:
n_warn = 4
# Make sure the warnings come out every time...
for _ in range(2):
with pytest.warns(wcs.FITSFixedWarning) as w:
wcs.WCS(header)
assert len(w) == n_warn
# 7.4 adds a fifth warning "'datfix' made the change 'Success'."
for item in w[:4]:
assert 'PCi_ja' in str(item.message)
def test_warning_about_defunct_keywords_exception():
header = get_pkg_data_contents('data/defunct_keywords.hdr', encoding='binary')
with pytest.warns(wcs.FITSFixedWarning):
wcs.WCS(header)
def test_to_header_string():
hdrstr = (
"WCSAXES = 2 / Number of coordinate axes ",
"CRPIX1 = 0.0 / Pixel coordinate of reference point ",
"CRPIX2 = 0.0 / Pixel coordinate of reference point ",
"CDELT1 = 1.0 / Coordinate increment at reference point ",
"CDELT2 = 1.0 / Coordinate increment at reference point ",
"CRVAL1 = 0.0 / Coordinate value at reference point ",
"CRVAL2 = 0.0 / Coordinate value at reference point ",
"LATPOLE = 90.0 / [deg] Native latitude of celestial pole ",
)
if _WCSLIB_VER >= Version('7.3'):
hdrstr += (
"MJDREF = 0.0 / [d] MJD of fiducial time ",
)
elif _WCSLIB_VER >= Version('7.1'):
hdrstr += (
"DATEREF = '1858-11-17' / ISO-8601 fiducial time ",
"MJDREFI = 0.0 / [d] MJD of fiducial time, integer part ",
"MJDREFF = 0.0 / [d] MJD of fiducial time, fractional part "
)
hdrstr += ("END", )
header_string = ''.join(hdrstr)
w = wcs.WCS()
h0 = fits.Header.fromstring(w.to_header_string().strip())
if 'COMMENT' in h0:
del h0['COMMENT']
if '' in h0:
del h0['']
h1 = fits.Header.fromstring(header_string.strip())
assert dict(h0) == dict(h1)
def test_to_fits():
nrec = 11 if _WCSLIB_VER >= Version('7.1') else 8
if _WCSLIB_VER < Version('7.1'):
nrec = 8
elif _WCSLIB_VER < Version('7.3'):
nrec = 11
else:
nrec = 9
w = wcs.WCS()
header_string = w.to_header()
wfits = w.to_fits()
assert isinstance(wfits, fits.HDUList)
assert isinstance(wfits[0], fits.PrimaryHDU)
assert header_string == wfits[0].header[-nrec:]
def test_to_header_warning():
fits_name = get_pkg_data_filename('data/sip.fits')
with pytest.warns(wcs.FITSFixedWarning):
x = wcs.WCS(fits_name)
with pytest.warns(AstropyWarning, match='A_ORDER') as w:
x.to_header()
assert len(w) == 1
def test_no_comments_in_header():
w = wcs.WCS()
header = w.to_header()
assert w.wcs.alt not in header
assert 'COMMENT' + w.wcs.alt.strip() not in header
assert 'COMMENT' not in header
wkey = 'P'
header = w.to_header(key=wkey)
assert wkey not in header
assert 'COMMENT' not in header
assert 'COMMENT' + w.wcs.alt.strip() not in header
def test_find_all_wcs_crash():
"""
Causes a double free without a recent fix in wcslib_wrap.C
"""
with open(get_pkg_data_filename("data/too_many_pv.hdr")) as fd:
header = fd.read()
# We have to set fix=False here, because one of the fixing tasks is to
# remove redundant SCAMP distortion parameters when SIP distortion
# parameters are also present.
with pytest.raises(wcs.InvalidTransformError), pytest.warns(wcs.FITSFixedWarning):
wcs.find_all_wcs(header, fix=False)
# NOTE: Warning bubbles up from C layer during wcs.validate() and
# is hard to catch, so we just ignore it.
@pytest.mark.filterwarnings("ignore")
def test_validate():
results = wcs.validate(get_pkg_data_filename("data/validate.fits"))
results_txt = sorted(set([x.strip() for x in repr(results).splitlines()]))
if _WCSLIB_VER >= Version('7.6'):
filename = 'data/validate.7.6.txt'
elif _WCSLIB_VER >= Version('7.4'):
filename = 'data/validate.7.4.txt'
elif _WCSLIB_VER >= Version('6.0'):
filename = 'data/validate.6.txt'
elif _WCSLIB_VER >= Version('5.13'):
filename = 'data/validate.5.13.txt'
elif _WCSLIB_VER >= Version('5.0'):
filename = 'data/validate.5.0.txt'
else:
filename = 'data/validate.txt'
with open(get_pkg_data_filename(filename), "r") as fd:
lines = fd.readlines()
assert sorted(set([x.strip() for x in lines])) == results_txt
def test_validate_with_2_wcses():
# From Issue #2053
with pytest.warns(AstropyUserWarning):
results = wcs.validate(get_pkg_data_filename("data/2wcses.hdr"))
assert "WCS key 'A':" in str(results)
def test_crpix_maps_to_crval():
twcs = wcs.WCS(naxis=2)
twcs.wcs.crval = [251.29, 57.58]
twcs.wcs.cdelt = [1, 1]
twcs.wcs.crpix = [507, 507]
twcs.wcs.pc = np.array([[7.7e-6, 3.3e-5], [3.7e-5, -6.8e-6]])
twcs._naxis = [1014, 1014]
twcs.wcs.ctype = ['RA---TAN-SIP', 'DEC--TAN-SIP']
a = np.array(
[[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
[0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
[-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
[-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
[-2.81029767e-13, 0.0, 0.0, 0.0, 0.0]]
)
b = np.array(
[[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
[0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
[6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
[3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
[-5.92438525e-13, 0.0, 0.0, 0.0, 0.0]]
)
twcs.sip = wcs.Sip(a, b, None, None, twcs.wcs.crpix)
twcs.wcs.set()
pscale = np.sqrt(wcs.utils.proj_plane_pixel_area(twcs))
# test that CRPIX maps to CRVAL:
assert_allclose(
twcs.wcs_pix2world(*twcs.wcs.crpix, 1), twcs.wcs.crval,
rtol=0.0, atol=1e-6 * pscale
)
# test that CRPIX maps to CRVAL:
assert_allclose(
twcs.all_pix2world(*twcs.wcs.crpix, 1), twcs.wcs.crval,
rtol=0.0, atol=1e-6 * pscale
)
def test_all_world2pix(fname=None, ext=0,
tolerance=1.0e-4, origin=0,
random_npts=25000,
adaptive=False, maxiter=20,
detect_divergence=True):
"""Test all_world2pix, iterative inverse of all_pix2world"""
# Open test FITS file:
if fname is None:
fname = get_pkg_data_filename('data/j94f05bgq_flt.fits')
ext = ('SCI', 1)
if not os.path.isfile(fname):
raise OSError(f"Input file '{fname:s}' to 'test_all_world2pix' not found.")
h = fits.open(fname)
w = wcs.WCS(h[ext].header, h)
h.close()
del h
crpix = w.wcs.crpix
ncoord = crpix.shape[0]
# Assume that CRPIX is at the center of the image and that the image has
# a power-of-2 number of pixels along each axis. Only use the central
# 1/64 for this testing purpose:
naxesi_l = list((7. / 16 * crpix).astype(int))
naxesi_u = list((9. / 16 * crpix).astype(int))
# Generate integer indices of pixels (image grid):
img_pix = np.dstack([i.flatten() for i in
np.meshgrid(*map(range, naxesi_l, naxesi_u))])[0]
# Generage random data (in image coordinates):
with NumpyRNGContext(123456789):
rnd_pix = np.random.rand(random_npts, ncoord)
# Scale random data to cover the central part of the image
mwidth = 2 * (crpix * 1. / 8)
rnd_pix = crpix - 0.5 * mwidth + (mwidth - 1) * rnd_pix
# Reference pixel coordinates in image coordinate system (CS):
test_pix = np.append(img_pix, rnd_pix, axis=0)
# Reference pixel coordinates in sky CS using forward transformation:
all_world = w.all_pix2world(test_pix, origin)
try:
runtime_begin = datetime.now()
# Apply the inverse iterative process to pixels in world coordinates
# to recover the pixel coordinates in image space.
all_pix = w.all_world2pix(
all_world, origin, tolerance=tolerance, adaptive=adaptive,
maxiter=maxiter, detect_divergence=detect_divergence)
runtime_end = datetime.now()
except wcs.wcs.NoConvergence as e:
runtime_end = datetime.now()
ndiv = 0
if e.divergent is not None:
ndiv = e.divergent.shape[0]
print(f"There are {ndiv} diverging solutions.")
print(f"Indices of diverging solutions:\n{e.divergent}")
print(f"Diverging solutions:\n{e.best_solution[e.divergent]}\n")
print("Mean radius of the diverging solutions: {}"
.format(np.mean(
np.linalg.norm(e.best_solution[e.divergent], axis=1))))
print("Mean accuracy of the diverging solutions: {}\n"
.format(np.mean(
np.linalg.norm(e.accuracy[e.divergent], axis=1))))
else:
print("There are no diverging solutions.")
nslow = 0
if e.slow_conv is not None:
nslow = e.slow_conv.shape[0]
print(f"There are {nslow} slowly converging solutions.")
print(f"Indices of slowly converging solutions:\n{e.slow_conv}")
print(f"Slowly converging solutions:\n{e.best_solution[e.slow_conv]}\n")
else:
print("There are no slowly converging solutions.\n")
print("There are {} converged solutions."
.format(e.best_solution.shape[0] - ndiv - nslow))
print(f"Best solutions (all points):\n{e.best_solution}")
print(f"Accuracy:\n{e.accuracy}\n")
print("\nFinished running 'test_all_world2pix' with errors.\n"
"ERROR: {}\nRun time: {}\n"
.format(e.args[0], runtime_end - runtime_begin))
raise e
# Compute differences between reference pixel coordinates and
# pixel coordinates (in image space) recovered from reference
# pixels in world coordinates:
errors = np.sqrt(np.sum(np.power(all_pix - test_pix, 2), axis=1))
meanerr = np.mean(errors)
maxerr = np.amax(errors)
print("\nFinished running 'test_all_world2pix'.\n"
"Mean error = {:e} (Max error = {:e})\n"
"Run time: {}\n"
.format(meanerr, maxerr, runtime_end - runtime_begin))
assert(maxerr < 2.0 * tolerance)
def test_scamp_sip_distortion_parameters():
"""
Test parsing of WCS parameters with redundant SIP and SCAMP distortion
parameters.
"""
header = get_pkg_data_contents('data/validate.fits', encoding='binary')
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(header)
# Just check that this doesn't raise an exception.
w.all_pix2world(0, 0, 0)
def test_fixes2():
"""
From github issue #1854
"""
header = get_pkg_data_contents(
'data/nonstandard_units.hdr', encoding='binary')
with pytest.raises(wcs.InvalidTransformError):
wcs.WCS(header, fix=False)
def test_unit_normalization():
"""
From github issue #1918
"""
header = get_pkg_data_contents(
'data/unit.hdr', encoding='binary')
w = wcs.WCS(header)
assert w.wcs.cunit[2] == 'm/s'
def test_footprint_to_file(tmpdir):
"""
From github issue #1912
"""
# Arbitrary keywords from real data
hdr = {'CTYPE1': 'RA---ZPN', 'CRUNIT1': 'deg',
'CRPIX1': -3.3495999e+02, 'CRVAL1': 3.185790700000e+02,
'CTYPE2': 'DEC--ZPN', 'CRUNIT2': 'deg',
'CRPIX2': 3.0453999e+03, 'CRVAL2': 4.388538000000e+01,
'PV2_1': 1., 'PV2_3': 220., 'NAXIS1': 2048, 'NAXIS2': 1024}
w = wcs.WCS(hdr)
testfile = str(tmpdir.join('test.txt'))
w.footprint_to_file(testfile)
with open(testfile, 'r') as f:
lines = f.readlines()
assert len(lines) == 4
assert lines[2] == 'ICRS\n'
assert 'color=green' in lines[3]
w.footprint_to_file(testfile, coordsys='FK5', color='red')
with open(testfile, 'r') as f:
lines = f.readlines()
assert len(lines) == 4
assert lines[2] == 'FK5\n'
assert 'color=red' in lines[3]
with pytest.raises(ValueError):
w.footprint_to_file(testfile, coordsys='FOO')
del hdr['NAXIS1']
del hdr['NAXIS2']
w = wcs.WCS(hdr)
with pytest.warns(AstropyUserWarning):
w.footprint_to_file(testfile)
# Ignore FITSFixedWarning about keyrecords following the END keyrecord were
# ignored, which comes from src/astropy_wcs.c . Only a blind catch like this
# seems to work when pytest warnings are turned into exceptions.
@pytest.mark.filterwarnings('ignore')
def test_validate_faulty_wcs():
"""
From github issue #2053
"""
h = fits.Header()
# Illegal WCS:
h['RADESYSA'] = 'ICRS'
h['PV2_1'] = 1.0
hdu = fits.PrimaryHDU([[0]], header=h)
hdulist = fits.HDUList([hdu])
# Check that this doesn't raise a NameError exception
wcs.validate(hdulist)
def test_error_message():
header = get_pkg_data_contents(
'data/invalid_header.hdr', encoding='binary')
with pytest.raises(wcs.InvalidTransformError):
# Both lines are in here, because 0.4 calls .set within WCS.__init__,
# whereas 0.3 and earlier did not.
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(header, _do_set=False)
w.all_pix2world([[536.0, 894.0]], 0)
def test_out_of_bounds():
# See #2107
header = get_pkg_data_contents('data/zpn-hole.hdr', encoding='binary')
w = wcs.WCS(header)
ra, dec = w.wcs_pix2world(110, 110, 0)
assert np.isnan(ra)
assert np.isnan(dec)
ra, dec = w.wcs_pix2world(0, 0, 0)
assert not np.isnan(ra)
assert not np.isnan(dec)
def test_calc_footprint_1():
fits = get_pkg_data_filename('data/sip.fits')
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(fits)
axes = (1000, 1051)
ref = np.array([[202.39314493, 47.17753352],
[202.71885939, 46.94630488],
[202.94631893, 47.15855022],
[202.72053428, 47.37893142]])
footprint = w.calc_footprint(axes=axes)
assert_allclose(footprint, ref)
def test_calc_footprint_2():
""" Test calc_footprint without distortion. """
fits = get_pkg_data_filename('data/sip.fits')
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(fits)
axes = (1000, 1051)
ref = np.array([[202.39265216, 47.17756518],
[202.7469062, 46.91483312],
[203.11487481, 47.14359319],
[202.76092671, 47.40745948]])
footprint = w.calc_footprint(axes=axes, undistort=False)
assert_allclose(footprint, ref)
def test_calc_footprint_3():
""" Test calc_footprint with corner of the pixel."""
w = wcs.WCS()
w.wcs.ctype = ["GLON-CAR", "GLAT-CAR"]
w.wcs.crpix = [1.5, 5.5]
w.wcs.cdelt = [-0.1, 0.1]
axes = (2, 10)
ref = np.array([[0.1, -0.5],
[0.1, 0.5],
[359.9, 0.5],
[359.9, -0.5]])
footprint = w.calc_footprint(axes=axes, undistort=False, center=False)
assert_allclose(footprint, ref)
def test_sip():
# See #2107
header = get_pkg_data_contents('data/irac_sip.hdr', encoding='binary')
w = wcs.WCS(header)
x0, y0 = w.sip_pix2foc(200, 200, 0)
assert_allclose(72, x0, 1e-3)
assert_allclose(72, y0, 1e-3)
x1, y1 = w.sip_foc2pix(x0, y0, 0)
assert_allclose(200, x1, 1e-3)
assert_allclose(200, y1, 1e-3)
def test_sub_3d_with_sip():
# See #10527
header = get_pkg_data_contents('data/irac_sip.hdr', encoding='binary')
header = fits.Header.fromstring(header)
header['NAXIS'] = 3
header.set('NAXIS3', 64, after=header.index('NAXIS2'))
w = wcs.WCS(header, naxis=2)
assert w.naxis == 2
def test_printwcs(capsys):
"""
Just make sure that it runs
"""
h = get_pkg_data_contents(
'data/spectra/orion-freq-1.hdr', encoding='binary')
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(h)
w.printwcs()
captured = capsys.readouterr()
assert 'WCS Keywords' in captured.out
h = get_pkg_data_contents('data/3d_cd.hdr', encoding='binary')
w = wcs.WCS(h)
w.printwcs()
captured = capsys.readouterr()
assert 'WCS Keywords' in captured.out
def test_invalid_spherical():
header = """
SIMPLE = T / conforms to FITS standard
BITPIX = 8 / array data type
WCSAXES = 2 / no comment
CTYPE1 = 'RA---TAN' / TAN (gnomic) projection
CTYPE2 = 'DEC--TAN' / TAN (gnomic) projection
EQUINOX = 2000.0 / Equatorial coordinates definition (yr)
LONPOLE = 180.0 / no comment
LATPOLE = 0.0 / no comment
CRVAL1 = 16.0531567459 / RA of reference point
CRVAL2 = 23.1148929108 / DEC of reference point
CRPIX1 = 2129 / X reference pixel
CRPIX2 = 1417 / Y reference pixel
CUNIT1 = 'deg ' / X pixel scale units
CUNIT2 = 'deg ' / Y pixel scale units
CD1_1 = -0.00912247310646 / Transformation matrix
CD1_2 = -0.00250608809647 / no comment
CD2_1 = 0.00250608809647 / no comment
CD2_2 = -0.00912247310646 / no comment
IMAGEW = 4256 / Image width, in pixels.
IMAGEH = 2832 / Image height, in pixels.
"""
f = io.StringIO(header)
header = fits.Header.fromtextfile(f)
w = wcs.WCS(header)
x, y = w.wcs_world2pix(211, -26, 0)
assert np.isnan(x) and np.isnan(y)
def test_no_iteration():
# Regression test for #3066
w = wcs.WCS(naxis=2)
with pytest.raises(TypeError) as exc:
iter(w)
assert exc.value.args[0] == "'WCS' object is not iterable"
class NewWCS(wcs.WCS):
pass
w = NewWCS(naxis=2)
with pytest.raises(TypeError) as exc:
iter(w)
assert exc.value.args[0] == "'NewWCS' object is not iterable"
@pytest.mark.skipif('_wcs.__version__[0] < "5"',
reason="TPV only works with wcslib 5.x or later")
def test_sip_tpv_agreement():
sip_header = get_pkg_data_contents(
os.path.join("data", "siponly.hdr"), encoding='binary')
tpv_header = get_pkg_data_contents(
os.path.join("data", "tpvonly.hdr"), encoding='binary')
with pytest.warns(wcs.FITSFixedWarning):
w_sip = wcs.WCS(sip_header)
w_tpv = wcs.WCS(tpv_header)
assert_array_almost_equal(
w_sip.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv.all_pix2world([w_tpv.wcs.crpix], 1))
w_sip2 = wcs.WCS(w_sip.to_header())
w_tpv2 = wcs.WCS(w_tpv.to_header())
assert_array_almost_equal(
w_sip.all_pix2world([w_sip.wcs.crpix], 1),
w_sip2.all_pix2world([w_sip.wcs.crpix], 1))
assert_array_almost_equal(
w_tpv.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv2.all_pix2world([w_sip.wcs.crpix], 1))
assert_array_almost_equal(
w_sip2.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv2.all_pix2world([w_tpv.wcs.crpix], 1))
@pytest.mark.skipif('_wcs.__version__[0] < "5"',
reason="TPV only works with wcslib 5.x or later")
def test_tpv_copy():
# See #3904
tpv_header = get_pkg_data_contents(
os.path.join("data", "tpvonly.hdr"), encoding='binary')
with pytest.warns(wcs.FITSFixedWarning):
w_tpv = wcs.WCS(tpv_header)
ra, dec = w_tpv.wcs_pix2world([0, 100, 200], [0, -100, 200], 0)
assert ra[0] != ra[1] and ra[1] != ra[2]
assert dec[0] != dec[1] and dec[1] != dec[2]
def test_hst_wcs():
path = get_pkg_data_filename("data/dist_lookup.fits.gz")
with fits.open(path) as hdulist:
# wcslib will complain about the distortion parameters if they
# weren't correctly deleted from the header
w = wcs.WCS(hdulist[1].header, hdulist)
# Check pixel scale and area
assert_quantity_allclose(
w.proj_plane_pixel_scales(), [1.38484378e-05, 1.39758488e-05] * u.deg)
assert_quantity_allclose(
w.proj_plane_pixel_area(), 1.93085492e-10 * (u.deg * u.deg))
# Exercise the main transformation functions, mainly just for
# coverage
w.p4_pix2foc([0, 100, 200], [0, -100, 200], 0)
w.det2im([0, 100, 200], [0, -100, 200], 0)
w.cpdis1 = w.cpdis1
w.cpdis2 = w.cpdis2
w.det2im1 = w.det2im1
w.det2im2 = w.det2im2
w.sip = w.sip
w.cpdis1.cdelt = w.cpdis1.cdelt
w.cpdis1.crpix = w.cpdis1.crpix
w.cpdis1.crval = w.cpdis1.crval
w.cpdis1.data = w.cpdis1.data
assert w.sip.a_order == 4
assert w.sip.b_order == 4
assert w.sip.ap_order == 0
assert w.sip.bp_order == 0
assert_array_equal(w.sip.crpix, [2048., 1024.])
wcs.WCS(hdulist[1].header, hdulist)
def test_cpdis_comments():
path = get_pkg_data_filename("data/dist_lookup.fits.gz")
f = fits.open(path)
w = wcs.WCS(f[1].header, f)
hdr = w.to_fits()[0].header
f.close()
wcscards = list(hdr['CPDIS*'].cards) + list(hdr['DP*'].cards)
wcsdict = {k: (v, c) for k, v, c in wcscards}
refcards = [
('CPDIS1', 'LOOKUP', 'Prior distortion function type'),
('DP1.EXTVER', 1.0, 'Version number of WCSDVARR extension'),
('DP1.NAXES', 2.0, 'Number of independent variables in CPDIS function'),
('DP1.AXIS.1', 1.0, 'Axis number of the 1st variable in a CPDIS function'),
('DP1.AXIS.2', 2.0, 'Axis number of the 2nd variable in a CPDIS function'),
('CPDIS2', 'LOOKUP', 'Prior distortion function type'),
('DP2.EXTVER', 2.0, 'Version number of WCSDVARR extension'),
('DP2.NAXES', 2.0, 'Number of independent variables in CPDIS function'),
('DP2.AXIS.1', 1.0, 'Axis number of the 1st variable in a CPDIS function'),
('DP2.AXIS.2', 2.0, 'Axis number of the 2nd variable in a CPDIS function'),
]
assert len(wcsdict) == len(refcards)
for k, v, c in refcards:
assert wcsdict[k] == (v, c)
def test_d2im_comments():
path = get_pkg_data_filename("data/ie6d07ujq_wcs.fits")
f = fits.open(path)
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(f[0].header, f)
f.close()
wcscards = list(w.to_fits()[0].header['D2IM*'].cards)
wcsdict = {k: (v, c) for k, v, c in wcscards}
refcards = [
('D2IMDIS1', 'LOOKUP', 'Detector to image correction type'),
('D2IM1.EXTVER', 1.0, 'Version number of WCSDVARR extension'),
('D2IM1.NAXES', 2.0, 'Number of independent variables in D2IM function'),
('D2IM1.AXIS.1', 1.0, 'Axis number of the 1st variable in a D2IM function'),
('D2IM1.AXIS.2', 2.0, 'Axis number of the 2nd variable in a D2IM function'),
('D2IMDIS2', 'LOOKUP', 'Detector to image correction type'),
('D2IM2.EXTVER', 2.0, 'Version number of WCSDVARR extension'),
('D2IM2.NAXES', 2.0, 'Number of independent variables in D2IM function'),
('D2IM2.AXIS.1', 1.0, 'Axis number of the 1st variable in a D2IM function'),
('D2IM2.AXIS.2', 2.0, 'Axis number of the 2nd variable in a D2IM function'),
# ('D2IMERR1', 0.049, 'Maximum error of D2IM correction for axis 1'),
# ('D2IMERR2', 0.035, 'Maximum error of D2IM correction for axis 2'),
# ('D2IMEXT', 'iref$y7b1516hi_d2i.fits', ''),
]
assert len(wcsdict) == len(refcards)
for k, v, c in refcards:
assert wcsdict[k] == (v, c)
def test_sip_broken():
# This header caused wcslib to segfault because it has a SIP
# specification in a non-default keyword
hdr = get_pkg_data_contents("data/sip-broken.hdr")
wcs.WCS(hdr)
def test_no_truncate_crval():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [50, 50, 2.12345678e11]
w.wcs.cdelt = [1e-3, 1e-3, 1e8]
w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'FREQ']
w.wcs.set()
header = w.to_header()
for ii in range(3):
assert header[f'CRVAL{ii + 1}'] == w.wcs.crval[ii]
assert header[f'CDELT{ii + 1}'] == w.wcs.cdelt[ii]
def test_no_truncate_crval_try2():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [50, 50, 2.12345678e11]
w.wcs.cdelt = [1e-5, 1e-5, 1e5]
w.wcs.ctype = ['RA---SIN', 'DEC--SIN', 'FREQ']
w.wcs.cunit = ['deg', 'deg', 'Hz']
w.wcs.crpix = [1, 1, 1]
w.wcs.restfrq = 2.34e11
w.wcs.set()
header = w.to_header()
for ii in range(3):
assert header[f'CRVAL{ii + 1}'] == w.wcs.crval[ii]
assert header[f'CDELT{ii + 1}'] == w.wcs.cdelt[ii]
def test_no_truncate_crval_p17():
"""
Regression test for https://github.com/astropy/astropy/issues/5162
"""
w = wcs.WCS(naxis=2)
w.wcs.crval = [50.1234567890123456, 50.1234567890123456]
w.wcs.cdelt = [1e-3, 1e-3]
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w.wcs.set()
header = w.to_header()
assert header['CRVAL1'] != w.wcs.crval[0]
assert header['CRVAL2'] != w.wcs.crval[1]
header = w.to_header(relax=wcs.WCSHDO_P17)
assert header['CRVAL1'] == w.wcs.crval[0]
assert header['CRVAL2'] == w.wcs.crval[1]
def test_no_truncate_using_compare():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
This one uses WCS.wcs.compare and some slightly different values
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [2.409303333333E+02, 50, 2.12345678e11]
w.wcs.cdelt = [1e-3, 1e-3, 1e8]
w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'FREQ']
w.wcs.set()
w2 = wcs.WCS(w.to_header())
w.wcs.compare(w2.wcs)
def test_passing_ImageHDU():
"""
Passing ImageHDU or PrimaryHDU and comparing it with
wcs initialized from header. For #4493.
"""
path = get_pkg_data_filename('data/validate.fits')
with fits.open(path) as hdulist:
with pytest.warns(wcs.FITSFixedWarning):
wcs_hdu = wcs.WCS(hdulist[0])
wcs_header = wcs.WCS(hdulist[0].header)
assert wcs_hdu.wcs.compare(wcs_header.wcs)
wcs_hdu = wcs.WCS(hdulist[1])
wcs_header = wcs.WCS(hdulist[1].header)
assert wcs_hdu.wcs.compare(wcs_header.wcs)
def test_inconsistent_sip():
"""
Test for #4814
"""
hdr = get_pkg_data_contents("data/sip-broken.hdr")
ctx = ctx_for_v71_dateref_warnings()
with ctx:
w = wcs.WCS(hdr)
with pytest.warns(AstropyWarning):
newhdr = w.to_header(relax=None)
# CTYPE should not include "-SIP" if relax is None
with ctx:
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
newhdr = w.to_header(relax=False)
assert 'A_0_2' not in newhdr
# CTYPE should not include "-SIP" if relax is False
with ctx:
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
with pytest.warns(AstropyWarning):
newhdr = w.to_header(key="C")
assert 'A_0_2' not in newhdr
# Test writing header with a different key
with ctx:
wnew = wcs.WCS(newhdr, key='C')
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
with pytest.warns(AstropyWarning):
newhdr = w.to_header(key=" ")
# Test writing a primary WCS to header
with ctx:
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
# Test that "-SIP" is kept into CTYPE if relax=True and
# "-SIP" was in the original header
newhdr = w.to_header(relax=True)
with ctx:
wnew = wcs.WCS(newhdr)
assert all(ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
assert 'A_0_2' in newhdr
# Test that SIP coefficients are also written out.
assert wnew.sip is not None
# ######### broken header ###########
# Test that "-SIP" is added to CTYPE if relax=True and
# "-SIP" was not in the original header but SIP coefficients
# are present.
with ctx:
w = wcs.WCS(hdr)
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
newhdr = w.to_header(relax=True)
with ctx:
wnew = wcs.WCS(newhdr)
assert all(ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
def test_bounds_check():
"""Test for #4957"""
w = wcs.WCS(naxis=2)
w.wcs.ctype = ["RA---CAR", "DEC--CAR"]
w.wcs.cdelt = [10, 10]
w.wcs.crval = [-90, 90]
w.wcs.crpix = [1, 1]
w.wcs.bounds_check(False, False)
ra, dec = w.wcs_pix2world(300, 0, 0)
assert_allclose(ra, -180)
assert_allclose(dec, -30)
def test_naxis():
w = wcs.WCS(naxis=2)
w.wcs.crval = [1, 1]
w.wcs.cdelt = [0.1, 0.1]
w.wcs.crpix = [1, 1]
w._naxis = [1000, 500]
assert w.pixel_shape == (1000, 500)
assert w.array_shape == (500, 1000)
w.pixel_shape = (99, 59)
assert w._naxis == [99, 59]
w.array_shape = (45, 23)
assert w._naxis == [23, 45]
assert w.pixel_shape == (23, 45)
w.pixel_shape = None
assert w.pixel_bounds is None
def test_sip_with_altkey():
"""
Test that when creating a WCS object using a key, CTYPE with
that key is looked at and not the primary CTYPE.
fix for #5443.
"""
with fits.open(get_pkg_data_filename('data/sip.fits')) as f:
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(f[0].header)
# create a header with two WCSs.
h1 = w.to_header(relax=True, key='A')
h2 = w.to_header(relax=False)
h1['CTYPE1A'] = "RA---SIN-SIP"
h1['CTYPE2A'] = "DEC--SIN-SIP"
h1.update(h2)
with ctx_for_v71_dateref_warnings():
w = wcs.WCS(h1, key='A')
assert (w.wcs.ctype == np.array(['RA---SIN-SIP', 'DEC--SIN-SIP'])).all()
def test_to_fits_1():
"""
Test to_fits() with LookupTable distortion.
"""
fits_name = get_pkg_data_filename('data/dist.fits')
with pytest.warns(AstropyDeprecationWarning):
w = wcs.WCS(fits_name)
wfits = w.to_fits()
assert isinstance(wfits, fits.HDUList)
assert isinstance(wfits[0], fits.PrimaryHDU)
assert isinstance(wfits[1], fits.ImageHDU)
def test_keyedsip():
"""
Test sip reading with extra key.
"""
hdr_name = get_pkg_data_filename('data/sip-broken.hdr')
header = fits.Header.fromfile(hdr_name)
del header["CRPIX1"]
del header["CRPIX2"]
w = wcs.WCS(header=header, key="A")
assert isinstance(w.sip, wcs.Sip)
assert w.sip.crpix[0] == 2048
assert w.sip.crpix[1] == 1026
def test_zero_size_input():
with fits.open(get_pkg_data_filename('data/sip.fits')) as f:
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(f[0].header)
inp = np.zeros((0, 2))
assert_array_equal(inp, w.all_pix2world(inp, 0))
assert_array_equal(inp, w.all_world2pix(inp, 0))
inp = [], [1]
result = w.all_pix2world([], [1], 0)
assert_array_equal(inp[0], result[0])
assert_array_equal(inp[1], result[1])
result = w.all_world2pix([], [1], 0)
assert_array_equal(inp[0], result[0])
assert_array_equal(inp[1], result[1])
def test_scalar_inputs():
"""
Issue #7845
"""
wcsobj = wcs.WCS(naxis=1)
result = wcsobj.all_pix2world(2, 1)
assert_array_equal(result, [np.array(2.)])
assert result[0].shape == ()
result = wcsobj.all_pix2world([2], 1)
assert_array_equal(result, [np.array([2.])])
assert result[0].shape == (1,)
# Ignore RuntimeWarning raised on s390.
@pytest.mark.filterwarnings('ignore:.*invalid value encountered in.*')
def test_footprint_contains():
"""
Test WCS.footprint_contains(skycoord)
"""
header = """
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 1045.0 / Pixel coordinate of reference point
CRPIX2 = 1001.0 / Pixel coordinate of reference point
PC1_1 = -0.00556448550786 / Coordinate transformation matrix element
PC1_2 = -0.001042120133257 / Coordinate transformation matrix element
PC2_1 = 0.001181477028705 / Coordinate transformation matrix element
PC2_2 = -0.005590809742987 / Coordinate transformation matrix element
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / TAN (gnomonic) projection + SIP distortions
CTYPE2 = 'DEC--TAN' / TAN (gnomonic) projection + SIP distortions
CRVAL1 = 250.34971683647 / [deg] Coordinate value at reference point
CRVAL2 = 2.2808772582495 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 2.2808772582495 / [deg] Native latitude of celestial pole
RADESYS = 'ICRS' / Equatorial coordinate system
MJD-OBS = 58612.339199259 / [d] MJD of observation matching DATE-OBS
DATE-OBS= '2019-05-09T08:08:26.816Z' / ISO-8601 observation date matching MJD-OB
NAXIS = 2 / NAXIS
NAXIS1 = 2136 / length of first array dimension
NAXIS2 = 2078 / length of second array dimension
""" # noqa
header = fits.Header.fromstring(header.strip(), '\n')
test_wcs = wcs.WCS(header)
hasCoord = test_wcs.footprint_contains(SkyCoord(254, 2, unit='deg'))
assert hasCoord
hasCoord = test_wcs.footprint_contains(SkyCoord(240, 2, unit='deg'))
assert not hasCoord
hasCoord = test_wcs.footprint_contains(SkyCoord(24, 2, unit='deg'))
assert not hasCoord
def test_cunit():
# Initializing WCS
w1 = wcs.WCS(naxis=2)
w2 = wcs.WCS(naxis=2)
w3 = wcs.WCS(naxis=2)
w4 = wcs.WCS(naxis=2)
# Initializing the values of cunit
w1.wcs.cunit = ['deg', 'm/s']
w2.wcs.cunit = ['km/h', 'km/h']
w3.wcs.cunit = ['deg', 'm/s']
w4.wcs.cunit = ['deg', 'deg']
# Equality checking a cunit with itself
assert w1.wcs.cunit == w1.wcs.cunit
assert not w1.wcs.cunit != w1.wcs.cunit
# Equality checking of two different cunit object having same values
assert w1.wcs.cunit == w3.wcs.cunit
assert not w1.wcs.cunit != w3.wcs.cunit
# Equality checking of two different cunit object having the same first unit
# but different second unit (see #9154)
assert not w1.wcs.cunit == w4.wcs.cunit
assert w1.wcs.cunit != w4.wcs.cunit
# Inequality checking of two different cunit object having different values
assert not w1.wcs.cunit == w2.wcs.cunit
assert w1.wcs.cunit != w2.wcs.cunit
# Inequality checking of cunit with a list of literals
assert not w1.wcs.cunit == [1, 2, 3]
assert w1.wcs.cunit != [1, 2, 3]
# Inequality checking with some characters
assert not w1.wcs.cunit == ['a', 'b', 'c']
assert w1.wcs.cunit != ['a', 'b', 'c']
# Comparison is not implemented TypeError will raise
with pytest.raises(TypeError):
w1.wcs.cunit < w2.wcs.cunit
class TestWcsWithTime:
def setup(self):
if _WCSLIB_VER >= Version('7.1'):
fname = get_pkg_data_filename('data/header_with_time_wcslib71.fits')
else:
fname = get_pkg_data_filename('data/header_with_time.fits')
self.header = fits.Header.fromfile(fname)
with pytest.warns(wcs.FITSFixedWarning):
self.w = wcs.WCS(self.header, key='A')
def test_keywods2wcsprm(self):
""" Make sure Wcsprm is populated correctly from the header."""
ctype = [self.header[val] for val in self.header["CTYPE*"]]
crval = [self.header[val] for val in self.header["CRVAL*"]]
crpix = [self.header[val] for val in self.header["CRPIX*"]]
cdelt = [self.header[val] for val in self.header["CDELT*"]]
cunit = [self.header[val] for val in self.header["CUNIT*"]]
assert list(self.w.wcs.ctype) == ctype
time_axis_code = 4000 if _WCSLIB_VER >= Version('7.9') else 0
assert list(self.w.wcs.axis_types) == [2200, 2201, 3300, time_axis_code]
assert_allclose(self.w.wcs.crval, crval)
assert_allclose(self.w.wcs.crpix, crpix)
assert_allclose(self.w.wcs.cdelt, cdelt)
assert list(self.w.wcs.cunit) == cunit
naxis = self.w.naxis
assert naxis == 4
pc = np.zeros((naxis, naxis), dtype=np.float64)
for i in range(1, 5):
for j in range(1, 5):
if i == j:
pc[i-1, j-1] = self.header.get(f'PC{i}_{j}A', 1)
else:
pc[i-1, j-1] = self.header.get(f'PC{i}_{j}A', 0)
assert_allclose(self.w.wcs.pc, pc)
char_keys = ['timesys', 'trefpos', 'trefdir', 'plephem', 'timeunit',
'dateref', 'dateobs', 'datebeg', 'dateavg', 'dateend']
for key in char_keys:
assert getattr(self.w.wcs, key) == self.header.get(key, "")
num_keys = ['mjdref', 'mjdobs', 'mjdbeg', 'mjdend',
'jepoch', 'bepoch', 'tstart', 'tstop', 'xposure',
'timsyer', 'timrder', 'timedel', 'timepixr',
'timeoffs', 'telapse', 'czphs', 'cperi']
for key in num_keys:
if key.upper() == 'MJDREF':
hdrv = [self.header.get('MJDREFIA', np.nan),
self.header.get('MJDREFFA', np.nan)]
else:
hdrv = self.header.get(key, np.nan)
assert_allclose(getattr(self.w.wcs, key), hdrv)
def test_transforms(self):
assert_allclose(self.w.all_pix2world(*self.w.wcs.crpix, 1),
self.w.wcs.crval)
def test_invalid_coordinate_masking():
# Regression test for an issue which caused all coordinates to be set to NaN
# after a transformation rather than just the invalid ones as reported by
# WCSLIB. A specific example of this is that when considering an all-sky
# spectral cube with a spectral axis that is not correlated with the sky
# axes, if transforming pixel coordinates that did not fall 'in' the sky,
# the spectral world value was also masked even though that coordinate
# was valid.
w = wcs.WCS(naxis=3)
w.wcs.ctype = 'VELO_LSR', 'GLON-CAR', 'GLAT-CAR'
w.wcs.crval = -20, 0, 0
w.wcs.crpix = 1, 1441, 241
w.wcs.cdelt = 1.3, -0.125, 0.125
px = [-10, -10, 20]
py = [-10, 10, 20]
pz = [-10, 10, 20]
wx, wy, wz = w.wcs_pix2world(px, py, pz, 0)
# Before fixing this, wx used to return np.nan for the first element
assert_allclose(wx, [-33, -33, 6])
assert_allclose(wy, [np.nan, 178.75, 177.5])
assert_allclose(wz, [np.nan, -28.75, -27.5])
def test_no_pixel_area():
w = wcs.WCS(naxis=3)
# Pixel area cannot be computed
with pytest.raises(ValueError, match='Pixel area is defined only for 2D pixels'):
w.proj_plane_pixel_area()
# Pixel scales still possible
assert_quantity_allclose(w.proj_plane_pixel_scales(), 1)
def test_distortion_header(tmpdir):
"""
Test that plate distortion model is correctly described by `wcs.to_header()`
and preserved when creating a Cutout2D from the image, writing it to FITS,
and reading it back from the file.
"""
path = get_pkg_data_filename("data/dss.14.29.56-62.41.05.fits.gz")
cen = np.array((50, 50))
siz = np.array((20, 20))
with fits.open(path) as hdulist:
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(hdulist[0].header)
cut = Cutout2D(hdulist[0].data, position=cen, size=siz, wcs=w)
# This converts the DSS plate solution model with AMD[XY]n coefficients into a
# Template Polynomial Distortion model (TPD.FWD.n coefficients);
# not testing explicitly for the header keywords here.
if _WCSLIB_VER < Version("7.4"):
with pytest.warns(AstropyWarning, match="WCS contains a TPD distortion model in CQDIS"):
w0 = wcs.WCS(w.to_header_string())
with pytest.warns(AstropyWarning, match="WCS contains a TPD distortion model in CQDIS"):
w1 = wcs.WCS(cut.wcs.to_header_string())
if _WCSLIB_VER >= Version("7.1"):
pytest.xfail("TPD coefficients incomplete with WCSLIB >= 7.1 < 7.4")
else:
w0 = wcs.WCS(w.to_header_string())
w1 = wcs.WCS(cut.wcs.to_header_string())
assert w.pixel_to_world(0, 0).separation(w0.pixel_to_world(0, 0)) < 1.e-3 * u.mas
assert w.pixel_to_world(*cen).separation(w0.pixel_to_world(*cen)) < 1.e-3 * u.mas
assert w.pixel_to_world(*cen).separation(w1.pixel_to_world(*(siz / 2))) < 1.e-3 * u.mas
cutfile = str(tmpdir.join('cutout.fits'))
fits.writeto(cutfile, cut.data, cut.wcs.to_header())
with fits.open(cutfile) as hdulist:
w2 = wcs.WCS(hdulist[0].header)
assert w.pixel_to_world(*cen).separation(w2.pixel_to_world(*(siz / 2))) < 1.e-3 * u.mas
def test_pixlist_wcs_colsel():
"""
Test selection of a specific pixel list WCS using ``colsel``. See #11412.
"""
hdr_file = get_pkg_data_filename('data/chandra-pixlist-wcs.hdr')
hdr = fits.Header.fromtextfile(hdr_file)
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(hdr, keysel=['image', 'pixel'], colsel=[11, 12])
assert w.naxis == 2
assert list(w.wcs.ctype) == ['RA---TAN', 'DEC--TAN']
assert np.allclose(w.wcs.crval, [229.38051931869, -58.81108068885])
assert np.allclose(w.wcs.pc, [[1, 0], [0, 1]])
assert np.allclose(w.wcs.cdelt, [-0.00013666666666666, 0.00013666666666666])
assert np.allclose(w.wcs.lonpole, 180.)
@pytest.mark.skipif(
_WCSLIB_VER < Version('7.8'),
reason="TIME axis extraction only works with wcslib 7.8 or later"
)
def test_time_axis_selection():
w = wcs.WCS(naxis=3)
w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'TIME']
w.wcs.set()
assert list(w.sub([wcs.WCSSUB_TIME]).wcs.ctype) == ['TIME']
assert (w.wcs_pix2world([[1, 2, 3]], 0)[0, 2] ==
w.sub([wcs.WCSSUB_TIME]).wcs_pix2world([[3]], 0)[0, 0])
@pytest.mark.skipif(
_WCSLIB_VER < Version('7.8'),
reason="TIME axis extraction only works with wcslib 7.8 or later"
)
def test_temporal():
w = wcs.WCS(naxis=3)
w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'TIME']
w.wcs.set()
assert w.has_temporal
assert w.sub([wcs.WCSSUB_TIME]).is_temporal
assert (w.wcs_pix2world([[1, 2, 3]], 0)[0, 2] ==
w.temporal.wcs_pix2world([[3]], 0)[0, 0])
def test_swapaxes_same_val_roundtrip():
w = wcs.WCS(naxis=3)
w.wcs.ctype = ["RA---TAN", "DEC--TAN", "FREQ"]
w.wcs.crpix = [32.5, 16.5, 1.]
w.wcs.crval = [5.63, -72.05, 1.]
w.wcs.pc = [[5.9e-06, 1.3e-05, 0.0], [-1.2e-05, 5.0e-06, 0.0], [0.0, 0.0, 1.0]]
w.wcs.cdelt = [1.0, 1.0, 1.0]
w.wcs.set()
axes_order = [3, 2, 1]
axes_order0 = list(i - 1 for i in axes_order)
ws = w.sub(axes_order)
imcoord = np.array([3, 5, 7])
imcoords = imcoord[axes_order0]
val_ref = w.wcs_pix2world([imcoord], 0)[0]
val_swapped = ws.wcs_pix2world([imcoords], 0)[0]
# check original axis and swapped give same results
assert np.allclose(val_ref[axes_order0], val_swapped, rtol=0, atol=1e-8)
# check round-tripping:
assert np.allclose(w.wcs_world2pix([val_ref], 0)[0], imcoord, rtol=0, atol=1e-8)
|
fb3d1eb325c954fc408ecb3788b518d9e5eeb622aa9dc05e5a83caf2fd885b81 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import numpy as np
import pytest
from packaging.version import Version
from astropy import wcs
from astropy.wcs import _wcs # noqa
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
from . helper import SimModelTAB
_WCSLIB_VER = Version(_wcs.__version__)
def test_2d_spatial_tab_roundtrip(tab_wcs_2di):
nx, ny = tab_wcs_2di.pixel_shape
# generate "random" test coordinates:
np.random.seed(1)
xy = 0.51 + [nx + 0.99, ny + 0.99] * np.random.random((100, 2))
rd = tab_wcs_2di.wcs_pix2world(xy, 1)
xy_roundtripped = tab_wcs_2di.wcs_world2pix(rd, 1)
m = np.logical_and(*(np.isfinite(xy_roundtripped).T))
assert np.allclose(xy[m], xy_roundtripped[m], rtol=0, atol=1e-7)
def test_2d_spatial_tab_vs_model():
nx = 150
ny = 200
model = SimModelTAB(nx=nx, ny=ny)
# generate FITS HDU list:
hdulist = model.hdulist
# create WCS object:
w = wcs.WCS(hdulist[0].header, hdulist)
# generate "random" test coordinates:
np.random.seed(1)
xy = 0.51 + [nx + 0.99, ny + 0.99] * np.random.random((100, 2))
rd = w.wcs_pix2world(xy, 1)
rd_model = model.fwd_eval(xy)
assert np.allclose(rd, rd_model, rtol=0, atol=1e-7)
@pytest.mark.skipif(
_WCSLIB_VER < Version('7.6'),
reason="Only in WCSLIB 7.6 a 1D -TAB axis roundtrips unless first axis"
)
def test_mixed_celest_and_1d_tab_roundtrip():
# Tests WCS roundtripping for the case when there is one -TAB axis and
# this axis is not the first axis. This tests a bug fixed in WCSLIB 7.6.
filename = get_pkg_data_filename('data/tab-time-last-axis.fits')
with fits.open(filename) as hdul:
w = wcs.WCS(hdul[0].header, hdul)
pts = np.random.random((10, 3)) * [[2047, 2047, 127]]
assert np.allclose(pts, w.wcs_world2pix(w.wcs_pix2world(pts, 0), 0))
@pytest.mark.skipif(
_WCSLIB_VER < Version('7.8'),
reason="Requires WCSLIB >= 7.8 for swapping -TAB axes to work."
)
def test_wcstab_swapaxes():
# Crash on deepcopy of swapped -TAB axes reported in #13036.
# Fixed in #13063.
filename = get_pkg_data_filename('data/tab-time-last-axis.fits')
with fits.open(filename) as hdul:
w = wcs.WCS(hdul[0].header, hdul)
w.wcs.ctype[-1] = 'FREQ-TAB'
w.wcs.set()
wswp = w.swapaxes(2, 0)
deepcopy(wswp)
@pytest.mark.skipif(
_WCSLIB_VER < Version('7.8'),
reason="Requires WCSLIB >= 7.8 for swapping -TAB axes to work."
)
@pytest.mark.xfail(
Version('7.8') <= _WCSLIB_VER < Version('7.10'),
reason="Requires WCSLIB >= 7.10 for swapped -TAB axes to produce same results."
)
def test_wcstab_swapaxes_same_val_roundtrip():
filename = get_pkg_data_filename('data/tab-time-last-axis.fits')
axes_order = [3, 2, 1]
axes_order0 = list(i - 1 for i in axes_order)
with fits.open(filename) as hdul:
w = wcs.WCS(hdul[0].header, hdul)
w.wcs.ctype[-1] = 'FREQ-TAB'
w.wcs.set()
ws = w.sub(axes_order)
imcoord = np.array([3, 5, 7])
imcoords = imcoord[axes_order0]
val_ref = w.wcs_pix2world([imcoord], 0)[0]
val_swapped = ws.wcs_pix2world([imcoords], 0)[0]
# check original axis and swapped give same results
assert np.allclose(val_ref[axes_order0], val_swapped, rtol=0, atol=1e-8)
# check round-tripping:
assert np.allclose(w.wcs_world2pix([val_ref], 0)[0], imcoord, rtol=0, atol=1e-8)
|
945c85c9083e256fe856ebdc36c09dc86bdc45653f98af06d2ae635de98ba701 | # Note that we test the main astropy.wcs.WCS class directly rather than testing
# the mix-in class on its own (since it's not functional without being used as
# a mix-in)
import warnings
from packaging.version import Version
import numpy as np
import pytest
from numpy.testing import assert_equal, assert_allclose
from itertools import product
from astropy import units as u
from astropy.time import Time
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import Quantity
from astropy.coordinates import ICRS, FK5, Galactic, SkyCoord, SpectralCoord, ITRS, EarthLocation
from astropy.io.fits import Header
from astropy.io.fits.verify import VerifyWarning
from astropy.units.core import UnitsWarning
from astropy.utils.data import get_pkg_data_filename
from astropy.wcs.wcs import WCS, FITSFixedWarning, Sip, NoConvergence
from astropy.wcs.wcsapi.fitswcs import custom_ctype_to_ucd_mapping, VELOCITY_FRAMES
from astropy.wcs._wcs import __version__ as wcsver
from astropy.utils import iers
from astropy.utils.exceptions import AstropyUserWarning
###############################################################################
# The following example is the simplest WCS with default values
###############################################################################
WCS_EMPTY = WCS(naxis=1)
WCS_EMPTY.wcs.crpix = [1]
def test_empty():
wcs = WCS_EMPTY
# Low-level API
assert wcs.pixel_n_dim == 1
assert wcs.world_n_dim == 1
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == [None]
assert wcs.world_axis_units == ['']
assert wcs.pixel_axis_names == ['']
assert wcs.world_axis_names == ['']
assert_equal(wcs.axis_correlation_matrix, True)
assert wcs.world_axis_object_components == [('world', 0, 'value')]
assert wcs.world_axis_object_classes['world'][0] is Quantity
assert wcs.world_axis_object_classes['world'][1] == ()
assert wcs.world_axis_object_classes['world'][2]['unit'] is u.one
assert_allclose(wcs.pixel_to_world_values(29), 29)
assert_allclose(wcs.array_index_to_world_values(29), 29)
assert np.ndim(wcs.pixel_to_world_values(29)) == 0
assert np.ndim(wcs.array_index_to_world_values(29)) == 0
assert_allclose(wcs.world_to_pixel_values(29), 29)
assert_equal(wcs.world_to_array_index_values(29), (29,))
assert np.ndim(wcs.world_to_pixel_values(29)) == 0
assert np.ndim(wcs.world_to_array_index_values(29)) == 0
# High-level API
coord = wcs.pixel_to_world(29)
assert_quantity_allclose(coord, 29 * u.one)
assert np.ndim(coord) == 0
coord = wcs.array_index_to_world(29)
assert_quantity_allclose(coord, 29 * u.one)
assert np.ndim(coord) == 0
coord = 15 * u.one
x = wcs.world_to_pixel(coord)
assert_allclose(x, 15.)
assert np.ndim(x) == 0
i = wcs.world_to_array_index(coord)
assert_equal(i, 15)
assert np.ndim(i) == 0
###############################################################################
# The following example is a simple 2D image with celestial coordinates
###############################################################################
HEADER_SIMPLE_CELESTIAL = """
WCSAXES = 2
CTYPE1 = RA---TAN
CTYPE2 = DEC--TAN
CRVAL1 = 10
CRVAL2 = 20
CRPIX1 = 30
CRPIX2 = 40
CDELT1 = -0.1
CDELT2 = 0.1
CROTA2 = 0.
CUNIT1 = deg
CUNIT2 = deg
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore', VerifyWarning)
WCS_SIMPLE_CELESTIAL = WCS(Header.fromstring(
HEADER_SIMPLE_CELESTIAL, sep='\n'))
def test_simple_celestial():
wcs = WCS_SIMPLE_CELESTIAL
# Low-level API
assert wcs.pixel_n_dim == 2
assert wcs.world_n_dim == 2
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == ['pos.eq.ra', 'pos.eq.dec']
assert wcs.world_axis_units == ['deg', 'deg']
assert wcs.pixel_axis_names == ['', '']
assert wcs.world_axis_names == ['', '']
assert_equal(wcs.axis_correlation_matrix, True)
assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'),
('celestial', 1, 'spherical.lat.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert_allclose(wcs.pixel_to_world_values(29, 39), (10, 20))
assert_allclose(wcs.array_index_to_world_values(39, 29), (10, 20))
assert_allclose(wcs.world_to_pixel_values(10, 20), (29., 39.))
assert_equal(wcs.world_to_array_index_values(10, 20), (39, 29))
# High-level API
coord = wcs.pixel_to_world(29, 39)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 10)
assert_allclose(coord.dec.deg, 20)
coord = wcs.array_index_to_world(39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 10)
assert_allclose(coord.dec.deg, 20)
coord = SkyCoord(10, 20, unit='deg', frame='icrs')
x, y = wcs.world_to_pixel(coord)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
i, j = wcs.world_to_array_index(coord)
assert_equal(i, 39)
assert_equal(j, 29)
# Check that if the coordinates are passed in a different frame things still
# work properly
coord_galactic = coord.galactic
x, y = wcs.world_to_pixel(coord_galactic)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
i, j = wcs.world_to_array_index(coord_galactic)
assert_equal(i, 39)
assert_equal(j, 29)
# Check that we can actually index the array
data = np.arange(3600).reshape((60, 60))
coord = SkyCoord(10, 20, unit='deg', frame='icrs')
index = wcs.world_to_array_index(coord)
assert_equal(data[index], 2369)
coord = SkyCoord([10, 12], [20, 22], unit='deg', frame='icrs')
index = wcs.world_to_array_index(coord)
assert_equal(data[index], [2369, 3550])
###############################################################################
# The following example is a spectral cube with axes in an unusual order
###############################################################################
HEADER_SPECTRAL_CUBE = """
WCSAXES = 3
CTYPE1 = GLAT-CAR
CTYPE2 = FREQ
CTYPE3 = GLON-CAR
CNAME1 = Latitude
CNAME2 = Frequency
CNAME3 = Longitude
CRVAL1 = 10
CRVAL2 = 20
CRVAL3 = 25
CRPIX1 = 30
CRPIX2 = 40
CRPIX3 = 45
CDELT1 = -0.1
CDELT2 = 0.5
CDELT3 = 0.1
CUNIT1 = deg
CUNIT2 = Hz
CUNIT3 = deg
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore', VerifyWarning)
WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep='\n'))
def test_spectral_cube():
# Spectral cube with a weird axis ordering
wcs = WCS_SPECTRAL_CUBE
# Low-level API
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert wcs.pixel_axis_names == ['', '', '']
assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude']
assert_equal(wcs.axis_correlation_matrix, [[True, False, True],
[False, True, False],
[True, False, True]])
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree')
assert wcs.world_axis_object_components[1][:2] == ('spectral', 0)
assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree')
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['spectral'][0] is Quantity
assert wcs.world_axis_object_classes['spectral'][1] == ()
assert wcs.world_axis_object_classes['spectral'][2] == {}
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29., 39., 44.))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
# High-level API
coord, spec = wcs.pixel_to_world(29, 39, 44)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, Galactic)
assert_allclose(coord.l.deg, 25)
assert_allclose(coord.b.deg, 10)
assert isinstance(spec, SpectralCoord)
assert_allclose(spec.to_value(u.Hz), 20)
coord, spec = wcs.array_index_to_world(44, 39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, Galactic)
assert_allclose(coord.l.deg, 25)
assert_allclose(coord.b.deg, 10)
assert isinstance(spec, SpectralCoord)
assert_allclose(spec.to_value(u.Hz), 20)
coord = SkyCoord(25, 10, unit='deg', frame='galactic')
spec = 20 * u.Hz
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
x, y, z = wcs.world_to_pixel(coord, spec)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
assert_allclose(z, 44.)
# Order of world coordinates shouldn't matter
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
x, y, z = wcs.world_to_pixel(spec, coord)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
assert_allclose(z, 44.)
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
i, j, k = wcs.world_to_array_index(coord, spec)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
# Order of world coordinates shouldn't matter
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
i, j, k = wcs.world_to_array_index(spec, coord)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
HEADER_SPECTRAL_CUBE_NONALIGNED = HEADER_SPECTRAL_CUBE.strip() + '\n' + """
PC2_3 = -0.5
PC3_2 = +0.5
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore', VerifyWarning)
WCS_SPECTRAL_CUBE_NONALIGNED = WCS(Header.fromstring(
HEADER_SPECTRAL_CUBE_NONALIGNED, sep='\n'))
def test_spectral_cube_nonaligned():
# Make sure that correlation matrix gets adjusted if there are non-identity
# CD matrix terms.
wcs = WCS_SPECTRAL_CUBE_NONALIGNED
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert wcs.pixel_axis_names == ['', '', '']
assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude']
assert_equal(wcs.axis_correlation_matrix, [[True, True, True],
[False, True, True],
[True, True, True]])
# NOTE: we check world_axis_object_components and world_axis_object_classes
# again here because in the past this failed when non-aligned axes were
# present, so this serves as a regression test.
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree')
assert wcs.world_axis_object_components[1][:2] == ('spectral', 0)
assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree')
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['spectral'][0] is Quantity
assert wcs.world_axis_object_classes['spectral'][1] == ()
assert wcs.world_axis_object_classes['spectral'][2] == {}
###############################################################################
# The following example is from Rots et al (2015), Table 5. It represents a
# cube with two spatial dimensions and one time dimension
###############################################################################
HEADER_TIME_CUBE = """
SIMPLE = T / Fits standard
BITPIX = -32 / Bits per pixel
NAXIS = 3 / Number of axes
NAXIS1 = 2048 / Axis length
NAXIS2 = 2048 / Axis length
NAXIS3 = 11 / Axis length
DATE = '2008-10-28T14:39:06' / Date FITS file was generated
OBJECT = '2008 TC3' / Name of the object observed
EXPTIME = 1.0011 / Integration time
MJD-OBS = 54746.02749237 / Obs start
DATE-OBS= '2008-10-07T00:39:35.3342' / Observing date
TELESCOP= 'VISTA' / ESO Telescope Name
INSTRUME= 'VIRCAM' / Instrument used.
TIMESYS = 'UTC' / From Observatory Time System
TREFPOS = 'TOPOCENT' / Topocentric
MJDREF = 54746.0 / Time reference point in MJD
RADESYS = 'ICRS' / Not equinoctal
CTYPE2 = 'RA---ZPN' / Zenithal Polynomial Projection
CRVAL2 = 2.01824372640628 / RA at ref pixel
CUNIT2 = 'deg' / Angles are degrees always
CRPIX2 = 2956.6 / Pixel coordinate at ref point
CTYPE1 = 'DEC--ZPN' / Zenithal Polynomial Projection
CRVAL1 = 14.8289418840003 / Dec at ref pixel
CUNIT1 = 'deg' / Angles are degrees always
CRPIX1 = -448.2 / Pixel coordinate at ref point
CTYPE3 = 'UTC' / linear time (UTC)
CRVAL3 = 2375.341 / Relative time of first frame
CUNIT3 = 's' / Time unit
CRPIX3 = 1.0 / Pixel coordinate at ref point
CTYPE3A = 'TT' / alternative linear time (TT)
CRVAL3A = 2440.525 / Relative time of first frame
CUNIT3A = 's' / Time unit
CRPIX3A = 1.0 / Pixel coordinate at ref point
OBSGEO-B= -24.6157 / [deg] Tel geodetic latitute (=North)+
OBSGEO-L= -70.3976 / [deg] Tel geodetic longitude (=East)+
OBSGEO-H= 2530.0000 / [m] Tel height above reference ellipsoid
CRDER3 = 0.0819 / random error in timings from fit
CSYER3 = 0.0100 / absolute time error
PC1_1 = 0.999999971570892 / WCS transform matrix element
PC1_2 = 0.000238449608932 / WCS transform matrix element
PC2_1 = -0.000621542859395 / WCS transform matrix element
PC2_2 = 0.999999806842218 / WCS transform matrix element
CDELT1 = -9.48575432499806E-5 / Axis scale at reference point
CDELT2 = 9.48683176211164E-5 / Axis scale at reference point
CDELT3 = 13.3629 / Axis scale at reference point
PV1_1 = 1. / ZPN linear term
PV1_3 = 42. / ZPN cubic term
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore', (VerifyWarning, FITSFixedWarning))
WCS_TIME_CUBE = WCS(Header.fromstring(HEADER_TIME_CUBE, sep='\n'))
def test_time_cube():
# Spectral cube with a weird axis ordering
wcs = WCS_TIME_CUBE
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (11, 2048, 2048)
assert wcs.pixel_shape == (2048, 2048, 11)
assert wcs.world_axis_physical_types == ['pos.eq.dec', 'pos.eq.ra', 'time']
assert wcs.world_axis_units == ['deg', 'deg', 's']
assert wcs.pixel_axis_names == ['', '', '']
assert wcs.world_axis_names == ['', '', '']
assert_equal(wcs.axis_correlation_matrix, [[True, True, False],
[True, True, False],
[False, False, True]])
components = wcs.world_axis_object_components
assert components[0] == ('celestial', 1, 'spherical.lat.degree')
assert components[1] == ('celestial', 0, 'spherical.lon.degree')
assert components[2][:2] == ('time', 0)
assert callable(components[2][2])
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['time'][0] is Time
assert wcs.world_axis_object_classes['time'][1] == ()
assert wcs.world_axis_object_classes['time'][2] == {}
assert callable(wcs.world_axis_object_classes['time'][3])
assert_allclose(wcs.pixel_to_world_values(-449.2, 2955.6, 0),
(14.8289418840003, 2.01824372640628, 2375.341))
assert_allclose(wcs.array_index_to_world_values(0, 2955.6, -449.2),
(14.8289418840003, 2.01824372640628, 2375.341))
assert_allclose(wcs.world_to_pixel_values(14.8289418840003, 2.01824372640628, 2375.341),
(-449.2, 2955.6, 0))
assert_equal(wcs.world_to_array_index_values(14.8289418840003, 2.01824372640628, 2375.341),
(0, 2956, -449))
# High-level API
coord, time = wcs.pixel_to_world(29, 39, 44)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 1.7323356692202325)
assert_allclose(coord.dec.deg, 14.783516054817797)
assert isinstance(time, Time)
assert_allclose(time.mjd, 54746.03429755324)
coord, time = wcs.array_index_to_world(44, 39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 1.7323356692202325)
assert_allclose(coord.dec.deg, 14.783516054817797)
assert isinstance(time, Time)
assert_allclose(time.mjd, 54746.03429755324)
x, y, z = wcs.world_to_pixel(coord, time)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
assert_allclose(z, 44.)
# Order of world coordinates shouldn't matter
x, y, z = wcs.world_to_pixel(time, coord)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
assert_allclose(z, 44.)
i, j, k = wcs.world_to_array_index(coord, time)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
# Order of world coordinates shouldn't matter
i, j, k = wcs.world_to_array_index(time, coord)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
###############################################################################
# The following tests are to make sure that Time objects are constructed
# correctly for a variety of combinations of WCS keywords
###############################################################################
HEADER_TIME_1D = """
SIMPLE = T
BITPIX = -32
NAXIS = 1
NAXIS1 = 2048
TIMESYS = 'UTC'
TREFPOS = 'TOPOCENT'
MJDREF = 50002.6
CTYPE1 = 'UTC'
CRVAL1 = 5
CUNIT1 = 's'
CRPIX1 = 1.0
CDELT1 = 2
OBSGEO-L= -20
OBSGEO-B= -70
OBSGEO-H= 2530
"""
if Version(wcsver) >= Version('7.1'):
HEADER_TIME_1D += "DATEREF = '1995-10-12T14:24:00'\n"
@pytest.fixture
def header_time_1d():
return Header.fromstring(HEADER_TIME_1D, sep='\n')
def assert_time_at(header, position, jd1, jd2, scale, format):
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header)
time = wcs.pixel_to_world(position)
assert_allclose(time.jd1, jd1, rtol=1e-10)
assert_allclose(time.jd2, jd2, rtol=1e-10)
assert time.format == format
assert time.scale == scale
@pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc', 'local'))
def test_time_1d_values(header_time_1d, scale):
# Check that Time objects are instantiated with the correct values,
# scales, and formats.
header_time_1d['CTYPE1'] = scale.upper()
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, scale, 'mjd')
def test_time_1d_values_gps(header_time_1d):
# Special treatment for GPS scale
header_time_1d['CTYPE1'] = 'GPS'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + (7 + 19) / 3600 / 24, 'tai', 'mjd')
def test_time_1d_values_deprecated(header_time_1d):
# Deprecated (in FITS) scales
header_time_1d['CTYPE1'] = 'TDT'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd')
header_time_1d['CTYPE1'] = 'IAT'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd')
header_time_1d['CTYPE1'] = 'GMT'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd')
header_time_1d['CTYPE1'] = 'ET'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd')
def test_time_1d_values_time(header_time_1d):
header_time_1d['CTYPE1'] = 'TIME'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd')
header_time_1d['TIMESYS'] = 'TAI'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd')
@pytest.mark.remote_data
@pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc'))
def test_time_1d_roundtrip(header_time_1d, scale):
# Check that coordinates round-trip
pixel_in = np.arange(3, 10)
header_time_1d['CTYPE1'] = scale.upper()
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header_time_1d)
# Simple test
time = wcs.pixel_to_world(pixel_in)
pixel_out = wcs.world_to_pixel(time)
assert_allclose(pixel_in, pixel_out)
# Test with an intermediate change to a different scale/format
time = wcs.pixel_to_world(pixel_in).tdb
time.format = 'isot'
pixel_out = wcs.world_to_pixel(time)
assert_allclose(pixel_in, pixel_out)
def test_time_1d_high_precision(header_time_1d):
# Case where the MJDREF is split into two for high precision
del header_time_1d['MJDREF']
header_time_1d['MJDREFI'] = 52000.
header_time_1d['MJDREFF'] = 1e-11
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header_time_1d)
time = wcs.pixel_to_world(10)
# Here we have to use a very small rtol to really test that MJDREFF is
# taken into account
assert_allclose(time.jd1, 2452001.0, rtol=1e-12)
assert_allclose(time.jd2, -0.5 + 25 / 3600 / 24 + 1e-11, rtol=1e-13)
def test_time_1d_location_geodetic(header_time_1d):
# Make sure that the location is correctly returned (geodetic case)
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header_time_1d)
time = wcs.pixel_to_world(10)
lon, lat, alt = time.location.to_geodetic()
# FIXME: alt won't work for now because ERFA doesn't implement the IAU 1976
# ellipsoid (https://github.com/astropy/astropy/issues/9420)
assert_allclose(lon.degree, -20)
assert_allclose(lat.degree, -70)
# assert_allclose(alt.to_value(u.m), 2530.)
@pytest.fixture
def header_time_1d_no_obs():
header = Header.fromstring(HEADER_TIME_1D, sep='\n')
del header['OBSGEO-L']
del header['OBSGEO-B']
del header['OBSGEO-H']
return header
def test_time_1d_location_geocentric(header_time_1d_no_obs):
# Make sure that the location is correctly returned (geocentric case)
header = header_time_1d_no_obs
header['OBSGEO-X'] = 10
header['OBSGEO-Y'] = -20
header['OBSGEO-Z'] = 30
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header)
time = wcs.pixel_to_world(10)
x, y, z = time.location.to_geocentric()
assert_allclose(x.to_value(u.m), 10)
assert_allclose(y.to_value(u.m), -20)
assert_allclose(z.to_value(u.m), 30)
def test_time_1d_location_geocenter(header_time_1d_no_obs):
header_time_1d_no_obs['TREFPOS'] = 'GEOCENTER'
wcs = WCS(header_time_1d_no_obs)
time = wcs.pixel_to_world(10)
x, y, z = time.location.to_geocentric()
assert_allclose(x.to_value(u.m), 0)
assert_allclose(y.to_value(u.m), 0)
assert_allclose(z.to_value(u.m), 0)
def test_time_1d_location_missing(header_time_1d_no_obs):
# Check what happens when no location is present
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(UserWarning,
match='Missing or incomplete observer location '
'information, setting location in Time to None'):
time = wcs.pixel_to_world(10)
assert time.location is None
def test_time_1d_location_incomplete(header_time_1d_no_obs):
# Check what happens when location information is incomplete
header_time_1d_no_obs['OBSGEO-L'] = 10.
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(UserWarning,
match='Missing or incomplete observer location '
'information, setting location in Time to None'):
time = wcs.pixel_to_world(10)
assert time.location is None
def test_time_1d_location_unsupported(header_time_1d_no_obs):
# Check what happens when TREFPOS is unsupported
header_time_1d_no_obs['TREFPOS'] = 'BARYCENTER'
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(UserWarning,
match="Observation location 'barycenter' is not "
"supported, setting location in Time to None"):
time = wcs.pixel_to_world(10)
assert time.location is None
def test_time_1d_unsupported_ctype(header_time_1d_no_obs):
# For cases that we don't support yet, e.g. UT(...), use Time and drop sub-scale
# Case where the MJDREF is split into two for high precision
header_time_1d_no_obs['CTYPE1'] = 'UT(WWV)'
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(UserWarning,
match="Dropping unsupported sub-scale WWV from scale UT"):
time = wcs.pixel_to_world(10)
assert isinstance(time, Time)
###############################################################################
# Extra corner cases
###############################################################################
def test_unrecognized_unit():
# TODO: Determine whether the following behavior is desirable
wcs = WCS(naxis=1)
with pytest.warns(UnitsWarning):
wcs.wcs.cunit = ['bananas // sekonds']
assert wcs.world_axis_units == ['bananas // sekonds']
def test_distortion_correlations():
filename = get_pkg_data_filename('../../tests/data/sip.fits')
with pytest.warns(FITSFixedWarning):
w = WCS(filename)
assert_equal(w.axis_correlation_matrix, True)
# Changing PC to an identity matrix doesn't change anything since
# distortions are still present.
w.wcs.pc = [[1, 0], [0, 1]]
assert_equal(w.axis_correlation_matrix, True)
# Nor does changing the name of the axes to make them non-celestial
w.wcs.ctype = ['X', 'Y']
assert_equal(w.axis_correlation_matrix, True)
# However once we turn off the distortions the matrix changes
w.sip = None
assert_equal(w.axis_correlation_matrix, [[True, False], [False, True]])
# If we go back to celestial coordinates then the matrix is all True again
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_equal(w.axis_correlation_matrix, True)
# Or if we change to X/Y but have a non-identity PC
w.wcs.pc = [[0.9, -0.1], [0.1, 0.9]]
w.wcs.ctype = ['X', 'Y']
assert_equal(w.axis_correlation_matrix, True)
def test_custom_ctype_to_ucd_mappings():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ['SPAM']
assert wcs.world_axis_physical_types == [None]
# Check simple behavior
with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}):
assert wcs.world_axis_physical_types == [None]
with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit', 'SPAM': 'food.spam'}):
assert wcs.world_axis_physical_types == ['food.spam']
# Check nesting
with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}):
assert wcs.world_axis_physical_types == ['food.spam']
with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}):
with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
assert wcs.world_axis_physical_types == ['food.spam']
# Check priority in nesting
with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}):
with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
assert wcs.world_axis_physical_types == ['food.spam']
with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}):
assert wcs.world_axis_physical_types == ['notfood']
def test_caching_components_and_classes():
# Make sure that when we change the WCS object, the classes and components
# are updated (we use a cache internally, so we need to make sure the cache
# is invalidated if needed)
wcs = WCS_SIMPLE_CELESTIAL.deepcopy()
assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'),
('celestial', 1, 'spherical.lat.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
wcs.wcs.radesys = 'FK5'
frame = wcs.world_axis_object_classes['celestial'][2]['frame']
assert isinstance(frame, FK5)
assert frame.equinox.jyear == 2000.
wcs.wcs.equinox = 2010
frame = wcs.world_axis_object_classes['celestial'][2]['frame']
assert isinstance(frame, FK5)
assert frame.equinox.jyear == 2010.
def test_sub_wcsapi_attributes():
# Regression test for a bug that caused some of the WCS attributes to be
# incorrect when using WCS.sub or WCS.celestial (which is an alias for sub
# with lon/lat types).
wcs = WCS_SPECTRAL_CUBE.deepcopy()
wcs.pixel_shape = (30, 40, 50)
wcs.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
# Use celestial shortcut
wcs_sub1 = wcs.celestial
assert wcs_sub1.pixel_n_dim == 2
assert wcs_sub1.world_n_dim == 2
assert wcs_sub1.array_shape == (50, 30)
assert wcs_sub1.pixel_shape == (30, 50)
assert wcs_sub1.pixel_bounds == [(-1, 11), (5, 15)]
assert wcs_sub1.world_axis_physical_types == ['pos.galactic.lat', 'pos.galactic.lon']
assert wcs_sub1.world_axis_units == ['deg', 'deg']
assert wcs_sub1.world_axis_names == ['Latitude', 'Longitude']
# Try adding axes
wcs_sub2 = wcs.sub([0, 2, 0])
assert wcs_sub2.pixel_n_dim == 3
assert wcs_sub2.world_n_dim == 3
assert wcs_sub2.array_shape == (None, 40, None)
assert wcs_sub2.pixel_shape == (None, 40, None)
assert wcs_sub2.pixel_bounds == [None, (-2, 18), None]
assert wcs_sub2.world_axis_physical_types == [None, 'em.freq', None]
assert wcs_sub2.world_axis_units == ['', 'Hz', '']
assert wcs_sub2.world_axis_names == ['', 'Frequency', '']
# Use strings
wcs_sub3 = wcs.sub(['longitude', 'latitude'])
assert wcs_sub3.pixel_n_dim == 2
assert wcs_sub3.world_n_dim == 2
assert wcs_sub3.array_shape == (30, 50)
assert wcs_sub3.pixel_shape == (50, 30)
assert wcs_sub3.pixel_bounds == [(5, 15), (-1, 11)]
assert wcs_sub3.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat']
assert wcs_sub3.world_axis_units == ['deg', 'deg']
assert wcs_sub3.world_axis_names == ['Longitude', 'Latitude']
# Now try without CNAME set
wcs.wcs.cname = [''] * wcs.wcs.naxis
wcs_sub4 = wcs.sub(['longitude', 'latitude'])
assert wcs_sub4.pixel_n_dim == 2
assert wcs_sub4.world_n_dim == 2
assert wcs_sub4.array_shape == (30, 50)
assert wcs_sub4.pixel_shape == (50, 30)
assert wcs_sub4.pixel_bounds == [(5, 15), (-1, 11)]
assert wcs_sub4.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat']
assert wcs_sub4.world_axis_units == ['deg', 'deg']
assert wcs_sub4.world_axis_names == ['', '']
HEADER_POLARIZED = """
CTYPE1 = 'HPLT-TAN'
CTYPE2 = 'HPLN-TAN'
CTYPE3 = 'STOKES'
"""
@pytest.fixture
def header_polarized():
return Header.fromstring(HEADER_POLARIZED, sep='\n')
def test_phys_type_polarization(header_polarized):
w = WCS(header_polarized)
assert w.world_axis_physical_types[2] == 'phys.polarization.stokes'
###############################################################################
# Spectral transformations
###############################################################################
HEADER_SPECTRAL_FRAMES = """
BUNIT = 'Jy/beam'
EQUINOX = 2.000000000E+03
CTYPE1 = 'RA---SIN'
CRVAL1 = 2.60108333333E+02
CDELT1 = -2.777777845E-04
CRPIX1 = 1.0
CUNIT1 = 'deg'
CTYPE2 = 'DEC--SIN'
CRVAL2 = -9.75000000000E-01
CDELT2 = 2.777777845E-04
CRPIX2 = 1.0
CUNIT2 = 'deg'
CTYPE3 = 'FREQ'
CRVAL3 = 1.37835117405E+09
CDELT3 = 9.765625000E+04
CRPIX3 = 32.0
CUNIT3 = 'Hz'
SPECSYS = 'TOPOCENT'
RESTFRQ = 1.420405752E+09 / [Hz]
RADESYS = 'FK5'
"""
@pytest.fixture
def header_spectral_frames():
return Header.fromstring(HEADER_SPECTRAL_FRAMES, sep='\n')
def test_spectralcoord_frame(header_spectral_frames):
# This is a test to check the numerical results of transformations between
# different velocity frames. We simply make sure that the returned
# SpectralCoords are in the right frame but don't check the transformations
# since this is already done in test_spectralcoord_accuracy
# in astropy.coordinates.
with iers.conf.set_temp('auto_download', False):
obstime = Time(f"2009-05-04T04:44:23", scale='utc')
header = header_spectral_frames.copy()
header['MJD-OBS'] = obstime.mjd
header['CRVAL1'] = 16.33211
header['CRVAL2'] = -34.2221
header['OBSGEO-L'] = 144.2
header['OBSGEO-B'] = -20.2
header['OBSGEO-H'] = 0.
# We start off with a WCS defined in topocentric frequency
with pytest.warns(FITSFixedWarning):
wcs_topo = WCS(header)
# We convert a single pixel coordinate to world coordinates and keep only
# the second high level object - a SpectralCoord:
sc_topo = wcs_topo.pixel_to_world(0, 0, 31)[1]
# We check that this is in topocentric frame with zero velocities
assert isinstance(sc_topo, SpectralCoord)
assert isinstance(sc_topo.observer, ITRS)
assert sc_topo.observer.obstime.isot == obstime.isot
assert_equal(sc_topo.observer.data.differentials['s'].d_xyz.value, 0)
observatory = EarthLocation.from_geodetic(144.2, -20.2).get_itrs(obstime=obstime).transform_to(ICRS())
assert observatory.separation_3d(sc_topo.observer.transform_to(ICRS())) < 1 * u.km
for specsys, expected_frame in VELOCITY_FRAMES.items():
header['SPECSYS'] = specsys
with pytest.warns(FITSFixedWarning):
wcs = WCS(header)
sc = wcs.pixel_to_world(0, 0, 31)[1]
# Now transform to the expected velocity frame, which should leave
# the spectral coordinate unchanged
sc_check = sc.with_observer_stationary_relative_to(expected_frame)
assert_quantity_allclose(sc.quantity, sc_check.quantity)
@pytest.mark.parametrize(('ctype3', 'observer'), product(['ZOPT', 'BETA', 'VELO', 'VRAD', 'VOPT'], [False, True]))
def test_different_ctypes(header_spectral_frames, ctype3, observer):
header = header_spectral_frames.copy()
header['CTYPE3'] = ctype3
header['CRVAL3'] = 0.1
header['CDELT3'] = 0.001
if ctype3[0] == 'V':
header['CUNIT3'] = 'm s-1'
else:
header['CUNIT3'] = ''
header['RESTWAV'] = 1.420405752E+09
header['MJD-OBS'] = 55197
if observer:
header['OBSGEO-L'] = 144.2
header['OBSGEO-B'] = -20.2
header['OBSGEO-H'] = 0.
header['SPECSYS'] = 'BARYCENT'
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header)
skycoord, spectralcoord = wcs.pixel_to_world(0, 0, 31)
assert isinstance(spectralcoord, SpectralCoord)
if observer:
pix = wcs.world_to_pixel(skycoord, spectralcoord)
else:
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
pix = wcs.world_to_pixel(skycoord, spectralcoord)
assert_allclose(pix, [0, 0, 31], rtol=1e-6)
def test_non_convergence_warning():
"""Test case for issue #11446
Since we can't define a target accuracy when plotting a WCS `all_world2pix`
should not error but only warn when the default accuracy can't be reached.
"""
# define a minimal WCS where convergence fails for certain image positions
wcs = WCS(naxis=2)
crpix = [0, 0]
a = b = ap = bp = np.zeros((4, 4))
a[3, 0] = -1.20116753e-07
test_pos_x = [1000, 1]
test_pos_y = [0, 2]
wcs.sip = Sip(a, b, ap, bp, crpix)
# first make sure the WCS works when using a low accuracy
expected = wcs.all_world2pix(test_pos_x, test_pos_y, 0, tolerance=1e-3)
# then check that it fails when using the default accuracy
with pytest.raises(NoConvergence):
wcs.all_world2pix(test_pos_x, test_pos_y, 0)
# at last check that world_to_pixel_values raises a warning but returns
# the same 'low accuray' result
with pytest.warns(UserWarning):
assert_allclose(wcs.world_to_pixel_values(test_pos_x, test_pos_y),
expected)
HEADER_SPECTRAL_1D = """
CTYPE1 = 'FREQ'
CRVAL1 = 1.37835117405E+09
CDELT1 = 9.765625000E+04
CRPIX1 = 32.0
CUNIT1 = 'Hz'
SPECSYS = 'TOPOCENT'
RESTFRQ = 1.420405752E+09 / [Hz]
RADESYS = 'FK5'
"""
@pytest.fixture
def header_spectral_1d():
return Header.fromstring(HEADER_SPECTRAL_1D, sep='\n')
@pytest.mark.parametrize(('ctype1', 'observer'), product(['ZOPT', 'BETA', 'VELO', 'VRAD', 'VOPT'], [False, True]))
def test_spectral_1d(header_spectral_1d, ctype1, observer):
# This is a regression test for issues that happened with 1-d WCS
# where the target is not defined but observer is.
header = header_spectral_1d.copy()
header['CTYPE1'] = ctype1
header['CRVAL1'] = 0.1
header['CDELT1'] = 0.001
if ctype1[0] == 'V':
header['CUNIT1'] = 'm s-1'
else:
header['CUNIT1'] = ''
header['RESTWAV'] = 1.420405752E+09
header['MJD-OBS'] = 55197
if observer:
header['OBSGEO-L'] = 144.2
header['OBSGEO-B'] = -20.2
header['OBSGEO-H'] = 0.
header['SPECSYS'] = 'BARYCENT'
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header)
# First ensure that transformations round-trip
spectralcoord = wcs.pixel_to_world(31)
assert isinstance(spectralcoord, SpectralCoord)
assert spectralcoord.target is None
assert (spectralcoord.observer is not None) is observer
if observer:
expected_message = 'No target defined on SpectralCoord'
else:
expected_message = 'No observer defined on WCS'
with pytest.warns(AstropyUserWarning, match=expected_message):
pix = wcs.world_to_pixel(spectralcoord)
assert_allclose(pix, [31], rtol=1e-6)
# Also make sure that we can convert a SpectralCoord on which the observer
# is not defined but the target is.
with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
spectralcoord_no_obs = SpectralCoord(spectralcoord.quantity,
doppler_rest=spectralcoord.doppler_rest,
doppler_convention=spectralcoord.doppler_convention,
target=ICRS(10 * u.deg, 20 * u.deg, distance=1 * u.kpc))
if observer:
expected_message = 'No observer defined on SpectralCoord'
else:
expected_message = 'No observer defined on WCS'
with pytest.warns(AstropyUserWarning, match=expected_message):
pix2 = wcs.world_to_pixel(spectralcoord_no_obs)
assert_allclose(pix2, [31], rtol=1e-6)
# And finally check case when both observer and target are defined on the
# SpectralCoord
with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
spectralcoord_no_obs = SpectralCoord(spectralcoord.quantity,
doppler_rest=spectralcoord.doppler_rest,
doppler_convention=spectralcoord.doppler_convention,
observer=ICRS(10 * u.deg, 20 * u.deg, distance=0 * u.kpc),
target=ICRS(10 * u.deg, 20 * u.deg, distance=1 * u.kpc))
if observer:
pix3 = wcs.world_to_pixel(spectralcoord_no_obs)
else:
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
pix3 = wcs.world_to_pixel(spectralcoord_no_obs)
assert_allclose(pix3, [31], rtol=1e-6)
|
6fdd08781364f29ee8cbbe3b8d40720a19b06f866ef451ba2f95c721c4e2c5b8 | """
Helper functions for Time.
"""
from . import function_helpers
|
938ab58c7e777f9ddd7f591e89ed6fc3c3c1ddcbcda95855f41b17516dfc300e | """
Helpers for overriding numpy functions in
`~astropy.time.Time.__array_function__`.
"""
import numpy as np
from astropy.units.quantity_helper.function_helpers import FunctionAssigner
# TODO: Fill this in with functions that don't make sense for times
UNSUPPORTED_FUNCTIONS = {}
# Functions that return the final result of the numpy function
CUSTOM_FUNCTIONS = {}
custom_functions = FunctionAssigner(CUSTOM_FUNCTIONS)
@custom_functions(helps={np.linspace})
def linspace(tstart, tstop, *args, **kwargs):
from astropy.time import Time
if isinstance(tstart, Time):
if not isinstance(tstop, Time):
return NotImplemented
if kwargs.get('retstep'):
offsets, step = np.linspace(np.zeros(tstart.shape), np.ones(tstop.shape), *args, **kwargs)
tdelta = tstop - tstart
return tstart + tdelta * offsets, tdelta * step
else:
offsets = np.linspace(np.zeros(tstart.shape), np.ones(tstop.shape), *args, **kwargs)
return tstart + (tstop - tstart) * offsets
|
172964082434e22ec7ba942e7e380094a837d7090f8ecaafbd08094d48bc4918 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import copy
import functools
import datetime
from copy import deepcopy
from decimal import Decimal, localcontext
from io import StringIO
import numpy as np
import pytest
from numpy.testing import assert_allclose
import erfa
from erfa import ErfaWarning
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils import isiterable, iers
from astropy.time import (Time, TimeDelta, ScaleValueError, STANDARD_TIME_SCALES,
TimeString, TimezoneInfo, TIME_FORMATS)
from astropy.coordinates import EarthLocation
from astropy import units as u
from astropy.table import Column, Table
from astropy.utils.compat.optional_deps import HAS_PYTZ, HAS_H5PY # noqa
allclose_jd = functools.partial(np.allclose, rtol=np.finfo(float).eps, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=np.finfo(float).eps,
atol=np.finfo(float).eps) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=np.finfo(float).eps,
atol=np.finfo(float).eps * 24 * 3600)
allclose_year = functools.partial(np.allclose, rtol=np.finfo(float).eps,
atol=0.) # 14 microsec at current epoch
def setup_function(func):
func.FORMATS_ORIG = deepcopy(Time.FORMATS)
def teardown_function(func):
Time.FORMATS.clear()
Time.FORMATS.update(func.FORMATS_ORIG)
class TestBasic:
"""Basic tests stemming from initial example and API reference"""
def test_simple(self):
times = ['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00']
t = Time(times, format='iso', scale='utc')
assert (repr(t) == "<Time object: scale='utc' format='iso' "
"value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>")
assert allclose_jd(t.jd1, np.array([2451180., 2455198.]))
assert allclose_jd2(t.jd2, np.array([-0.5 + 1.4288980208333335e-06,
-0.50000000e+00]))
# Set scale to TAI
t = t.tai
assert (repr(t) == "<Time object: scale='tai' format='iso' "
"value=['1999-01-01 00:00:32.123' '2010-01-01 00:00:34.000']>")
assert allclose_jd(t.jd1, np.array([2451180., 2455198.]))
assert allclose_jd2(t.jd2, np.array([-0.5 + 0.00037179926839122024,
-0.5 + 0.00039351851851851852]))
# Get a new ``Time`` object which is referenced to the TT scale
# (internal JD1 and JD1 are now with respect to TT scale)"""
assert (repr(t.tt) == "<Time object: scale='tt' format='iso' "
"value=['1999-01-01 00:01:04.307' '2010-01-01 00:01:06.184']>")
# Get the representation of the ``Time`` object in a particular format
# (in this case seconds since 1998.0). This returns either a scalar or
# array, depending on whether the input was a scalar or array"""
assert allclose_sec(t.cxcsec, np.array([31536064.307456788, 378691266.18400002]))
def test_different_dimensions(self):
"""Test scalars, vector, and higher-dimensions"""
# scalar
val, val1 = 2450000.0, 0.125
t1 = Time(val, val1, format='jd')
assert t1.isscalar is True and t1.shape == ()
# vector
val = np.arange(2450000., 2450010.)
t2 = Time(val, format='jd')
assert t2.isscalar is False and t2.shape == val.shape
# explicitly check broadcasting for mixed vector, scalar.
val2 = 0.
t3 = Time(val, val2, format='jd')
assert t3.isscalar is False and t3.shape == val.shape
val2 = (np.arange(5.) / 10.).reshape(5, 1)
# now see if broadcasting to two-dimensional works
t4 = Time(val, val2, format='jd')
assert t4.isscalar is False
assert t4.shape == np.broadcast(val, val2).shape
@pytest.mark.parametrize('format_', Time.FORMATS)
def test_empty_value(self, format_):
t = Time([], format=format_)
assert t.size == 0
assert t.shape == (0,)
assert t.format == format_
t_value = t.value
assert t_value.size == 0
assert t_value.shape == (0,)
t2 = Time(t_value, format=format_)
assert t2.size == 0
assert t2.shape == (0,)
assert t2.format == format_
t3 = t2.tai
assert t3.size == 0
assert t3.shape == (0,)
assert t3.format == format_
assert t3.scale == 'tai'
@pytest.mark.parametrize('value', [2455197.5, [2455197.5]])
def test_copy_time(self, value):
"""Test copying the values of a Time object by passing it into the
Time initializer.
"""
t = Time(value, format='jd', scale='utc')
t2 = Time(t, copy=False)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is t2._time.jd1
assert t._time.jd2 is t2._time.jd2
t2 = Time(t, copy=True)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is not t2._time.jd1
assert t._time.jd2 is not t2._time.jd2
# Include initializers
t2 = Time(t, format='iso', scale='tai', precision=1)
assert t2.value == '2010-01-01 00:00:34.0'
t2 = Time(t, format='iso', scale='tai', out_subfmt='date')
assert t2.value == '2010-01-01'
def test_getitem(self):
"""Test that Time objects holding arrays are properly subscriptable,
set isscalar as appropriate, and also subscript delta_ut1_utc, etc."""
mjd = np.arange(50000, 50010)
t = Time(mjd, format='mjd', scale='utc', location=('45d', '50d'))
t1 = t[3]
assert t1.isscalar is True
assert t1._time.jd1 == t._time.jd1[3]
assert t1.location is t.location
t1a = Time(mjd[3], format='mjd', scale='utc')
assert t1a.isscalar is True
assert np.all(t1._time.jd1 == t1a._time.jd1)
t1b = Time(t[3])
assert t1b.isscalar is True
assert np.all(t1._time.jd1 == t1b._time.jd1)
t2 = t[4:6]
assert t2.isscalar is False
assert np.all(t2._time.jd1 == t._time.jd1[4:6])
assert t2.location is t.location
t2a = Time(t[4:6])
assert t2a.isscalar is False
assert np.all(t2a._time.jd1 == t._time.jd1[4:6])
t2b = Time([t[4], t[5]])
assert t2b.isscalar is False
assert np.all(t2b._time.jd1 == t._time.jd1[4:6])
t2c = Time((t[4], t[5]))
assert t2c.isscalar is False
assert np.all(t2c._time.jd1 == t._time.jd1[4:6])
t.delta_tdb_tt = np.arange(len(t)) # Explicitly set (not testing .tdb)
t3 = t[4:6]
assert np.all(t3._delta_tdb_tt == t._delta_tdb_tt[4:6])
t4 = Time(mjd, format='mjd', scale='utc',
location=(np.arange(len(mjd)), np.arange(len(mjd))))
t5a = t4[3]
assert t5a.location == t4.location[3]
assert t5a.location.shape == ()
t5b = t4[3:4]
assert t5b.location.shape == (1,)
# Check that indexing a size-1 array returns a scalar location as well;
# see gh-10113.
t5c = t5b[0]
assert t5c.location.shape == ()
t6 = t4[4:6]
assert np.all(t6.location == t4.location[4:6])
# check it is a view
# (via ndarray, since quantity setter problematic for structured array)
allzeros = np.array((0., 0., 0.), dtype=t4.location.dtype)
assert t6.location.view(np.ndarray)[-1] != allzeros
assert t4.location.view(np.ndarray)[5] != allzeros
t6.location.view(np.ndarray)[-1] = allzeros
assert t4.location.view(np.ndarray)[5] == allzeros
# Test subscription also works for two-dimensional arrays.
frac = np.arange(0., 0.999, 0.2)
t7 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=('45d', '50d'))
assert t7[0, 0]._time.jd1 == t7._time.jd1[0, 0]
assert t7[0, 0].isscalar is True
assert np.all(t7[5]._time.jd1 == t7._time.jd1[5])
assert np.all(t7[5]._time.jd2 == t7._time.jd2[5])
assert np.all(t7[:, 2]._time.jd1 == t7._time.jd1[:, 2])
assert np.all(t7[:, 2]._time.jd2 == t7._time.jd2[:, 2])
assert np.all(t7[:, 0]._time.jd1 == t._time.jd1)
assert np.all(t7[:, 0]._time.jd2 == t._time.jd2)
# Get tdb to check that delta_tdb_tt attribute is sliced properly.
t7_tdb = t7.tdb
assert t7_tdb[0, 0].delta_tdb_tt == t7_tdb.delta_tdb_tt[0, 0]
assert np.all(t7_tdb[5].delta_tdb_tt == t7_tdb.delta_tdb_tt[5])
assert np.all(t7_tdb[:, 2].delta_tdb_tt == t7_tdb.delta_tdb_tt[:, 2])
# Explicitly set delta_tdb_tt attribute. Now it should not be sliced.
t7.delta_tdb_tt = 0.1
t7_tdb2 = t7.tdb
assert t7_tdb2[0, 0].delta_tdb_tt == 0.1
assert t7_tdb2[5].delta_tdb_tt == 0.1
assert t7_tdb2[:, 2].delta_tdb_tt == 0.1
# Check broadcasting of location.
t8 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=(np.arange(len(frac)), np.arange(len(frac))))
assert t8[0, 0].location == t8.location[0, 0]
assert np.all(t8[5].location == t8.location[5])
assert np.all(t8[:, 2].location == t8.location[:, 2])
# Finally check empty array.
t9 = t[:0]
assert t9.isscalar is False
assert t9.shape == (0,)
assert t9.size == 0
def test_properties(self):
"""Use properties to convert scales and formats. Note that the UT1 to
UTC transformation requires a supplementary value (``delta_ut1_utc``)
that can be obtained by interpolating from a table supplied by IERS.
This is tested separately."""
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert allclose_jd(t.jd, 2455197.5)
assert t.iso == '2010-01-01 00:00:00.000'
assert t.tt.iso == '2010-01-01 00:01:06.184'
assert t.tai.fits == '2010-01-01T00:00:34.000'
assert allclose_jd(t.utc.jd, 2455197.5)
assert allclose_jd(t.ut1.jd, 2455197.500003867)
assert t.tcg.isot == '2010-01-01T00:01:06.910'
assert allclose_sec(t.unix, 1262304000.0)
assert allclose_sec(t.cxcsec, 378691266.184)
assert allclose_sec(t.gps, 946339215.0)
assert t.datetime == datetime.datetime(2010, 1, 1)
def test_precision(self):
"""Set the output precision which is used for some formats. This is
also a test of the code that provides a dict for global and instance
options."""
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
# Uses initial class-defined precision=3
assert t.iso == '2010-01-01 00:00:00.000'
# Set instance precision to 9
t.precision = 9
assert t.iso == '2010-01-01 00:00:00.000000000'
assert t.tai.utc.iso == '2010-01-01 00:00:00.000000000'
def test_transforms(self):
"""Transform from UTC to all supported time scales (TAI, TCB, TCG,
TDB, TT, UT1, UTC). This requires auxiliary information (latitude and
longitude)."""
lat = 19.48125
lon = -155.933222
t = Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=7, location=(lon, lat))
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == '2006-01-15 21:24:37.5000000'
assert t.ut1.iso == '2006-01-15 21:24:37.8341000'
assert t.tai.iso == '2006-01-15 21:25:10.5000000'
assert t.tt.iso == '2006-01-15 21:25:42.6840000'
assert t.tcg.iso == '2006-01-15 21:25:43.3226905'
assert t.tdb.iso == '2006-01-15 21:25:42.6843728'
assert t.tcb.iso == '2006-01-15 21:25:56.8939523'
def test_transforms_no_location(self):
"""Location should default to geocenter (relevant for TDB, TCB)."""
t = Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=7)
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == '2006-01-15 21:24:37.5000000'
assert t.ut1.iso == '2006-01-15 21:24:37.8341000'
assert t.tai.iso == '2006-01-15 21:25:10.5000000'
assert t.tt.iso == '2006-01-15 21:25:42.6840000'
assert t.tcg.iso == '2006-01-15 21:25:43.3226905'
assert t.tdb.iso == '2006-01-15 21:25:42.6843725'
assert t.tcb.iso == '2006-01-15 21:25:56.8939519'
# Check we get the same result
t2 = Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
location=(0*u.m, 0*u.m, 0*u.m))
assert t == t2
assert t.tdb == t2.tdb
def test_location(self):
"""Check that location creates an EarthLocation object, and that
such objects can be used as arguments.
"""
lat = 19.48125
lon = -155.933222
t = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=(lon, lat))
assert isinstance(t.location, EarthLocation)
location = EarthLocation(lon, lat)
t2 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=location)
assert isinstance(t2.location, EarthLocation)
assert t2.location == t.location
t3 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=(location.x, location.y, location.z))
assert isinstance(t3.location, EarthLocation)
assert t3.location == t.location
def test_location_array(self):
"""Check that location arrays are checked for size and used
for the corresponding times. Also checks that erfa
can handle array-valued locations, and can broadcast these if needed.
"""
lat = 19.48125
lon = -155.933222
t = Time(['2006-01-15 21:24:37.5'] * 2, format='iso', scale='utc',
precision=6, location=(lon, lat))
assert np.all(t.utc.iso == '2006-01-15 21:24:37.500000')
assert np.all(t.tdb.iso[0] == '2006-01-15 21:25:42.684373')
t2 = Time(['2006-01-15 21:24:37.5'] * 2, format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
assert np.all(t2.utc.iso == '2006-01-15 21:24:37.500000')
assert t2.tdb.iso[0] == '2006-01-15 21:25:42.684373'
assert t2.tdb.iso[1] != '2006-01-15 21:25:42.684373'
with pytest.raises(ValueError): # 1 time, but two locations
Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
with pytest.raises(ValueError): # 3 times, but two locations
Time(['2006-01-15 21:24:37.5'] * 3, format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
# multidimensional
mjd = np.arange(50000., 50008.).reshape(4, 2)
t3 = Time(mjd, format='mjd', scale='utc', location=(lon, lat))
assert t3.shape == (4, 2)
assert t3.location.shape == ()
assert t3.tdb.shape == t3.shape
t4 = Time(mjd, format='mjd', scale='utc',
location=(np.array([lon, 0]), np.array([lat, 0])))
assert t4.shape == (4, 2)
assert t4.location.shape == t4.shape
assert t4.tdb.shape == t4.shape
t5 = Time(mjd, format='mjd', scale='utc',
location=(np.array([[lon], [0], [0], [0]]),
np.array([[lat], [0], [0], [0]])))
assert t5.shape == (4, 2)
assert t5.location.shape == t5.shape
assert t5.tdb.shape == t5.shape
def test_all_scale_transforms(self):
"""Test that standard scale transforms work. Does not test correctness,
except reversibility [#2074]. Also tests that standard scales can't be
converted to local scales"""
lat = 19.48125
lon = -155.933222
with iers.conf.set_temp('auto_download', False):
for scale1 in STANDARD_TIME_SCALES:
t1 = Time('2006-01-15 21:24:37.5', format='iso', scale=scale1,
location=(lon, lat))
for scale2 in STANDARD_TIME_SCALES:
t2 = getattr(t1, scale2)
t21 = getattr(t2, scale1)
assert allclose_jd(t21.jd, t1.jd)
# test for conversion to local scale
scale3 = 'local'
with pytest.raises(ScaleValueError):
t2 = getattr(t1, scale3)
def test_creating_all_formats(self):
"""Create a time object using each defined format"""
Time(2000.5, format='decimalyear')
Time(100.0, format='cxcsec')
Time(100.0, format='unix')
Time(100.0, format='gps')
Time(1950.0, format='byear', scale='tai')
Time(2000.0, format='jyear', scale='tai')
Time('B1950.0', format='byear_str', scale='tai')
Time('J2000.0', format='jyear_str', scale='tai')
Time('2000-01-01 12:23:34.0', format='iso', scale='tai')
Time('2000-01-01 12:23:34.0Z', format='iso', scale='utc')
Time('2000-01-01T12:23:34.0', format='isot', scale='tai')
Time('2000-01-01T12:23:34.0Z', format='isot', scale='utc')
Time('2000-01-01T12:23:34.0', format='fits')
Time('2000-01-01T12:23:34.0', format='fits', scale='tdb')
Time(2400000.5, 51544.0333981, format='jd', scale='tai')
Time(0.0, 51544.0333981, format='mjd', scale='tai')
Time('2000:001:12:23:34.0', format='yday', scale='tai')
Time('2000:001:12:23:34.0Z', format='yday', scale='utc')
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
Time(dt, format='datetime', scale='tai')
Time([dt, dt], format='datetime', scale='tai')
dt64 = np.datetime64('2012-06-18T02:00:05.453000000')
Time(dt64, format='datetime64', scale='tai')
Time([dt64, dt64], format='datetime64', scale='tai')
def test_local_format_transforms(self):
"""
Test transformation of local time to different formats
Transformation to formats with reference time should give
ScalevalueError
"""
t = Time('2006-01-15 21:24:37.5', scale='local')
assert_allclose(t.jd, 2453751.3921006946, atol=0.001 / 3600. / 24., rtol=0.)
assert_allclose(t.mjd, 53750.892100694444, atol=0.001 / 3600. / 24., rtol=0.)
assert_allclose(t.decimalyear, 2006.0408002758752, atol=0.001 / 3600. / 24. / 365., rtol=0.)
assert t.datetime == datetime.datetime(2006, 1, 15, 21, 24, 37, 500000)
assert t.isot == '2006-01-15T21:24:37.500'
assert t.yday == '2006:015:21:24:37.500'
assert t.fits == '2006-01-15T21:24:37.500'
assert_allclose(t.byear, 2006.04217888831, atol=0.001 / 3600. / 24. / 365., rtol=0.)
assert_allclose(t.jyear, 2006.0407723496082, atol=0.001 / 3600. / 24. / 365., rtol=0.)
assert t.byear_str == 'B2006.042'
assert t.jyear_str == 'J2006.041'
# epochTimeFormats
with pytest.raises(ScaleValueError):
t.gps
with pytest.raises(ScaleValueError):
t.unix
with pytest.raises(ScaleValueError):
t.cxcsec
with pytest.raises(ScaleValueError):
t.plot_date
def test_datetime(self):
"""
Test datetime format, including guessing the format from the input type
by not providing the format keyword to Time.
"""
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
dt2 = datetime.datetime(2001, 1, 1)
t = Time(dt, scale='utc', precision=9)
assert t.iso == '2000-01-02 03:04:05.123456000'
assert t.datetime == dt
assert t.value == dt
t2 = Time(t.iso, scale='utc')
assert t2.datetime == dt
t = Time([dt, dt2], scale='utc')
assert np.all(t.value == [dt, dt2])
t = Time('2000-01-01 01:01:01.123456789', scale='tai')
assert t.datetime == datetime.datetime(2000, 1, 1, 1, 1, 1, 123457)
# broadcasting
dt3 = (dt + (dt2-dt) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale='utc')
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1])
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2]))
assert Time(t3[2, 0]) == t3[2, 0]
def test_datetime64(self):
dt64 = np.datetime64('2000-01-02T03:04:05.123456789')
dt64_2 = np.datetime64('2000-01-02')
t = Time(dt64, scale='utc', precision=9, format='datetime64')
assert t.iso == '2000-01-02 03:04:05.123456789'
assert t.datetime64 == dt64
assert t.value == dt64
t2 = Time(t.iso, scale='utc')
assert t2.datetime64 == dt64
t = Time(dt64_2, scale='utc', precision=3, format='datetime64')
assert t.iso == '2000-01-02 00:00:00.000'
assert t.datetime64 == dt64_2
assert t.value == dt64_2
t2 = Time(t.iso, scale='utc')
assert t2.datetime64 == dt64_2
t = Time([dt64, dt64_2], scale='utc', format='datetime64')
assert np.all(t.value == [dt64, dt64_2])
t = Time('2000-01-01 01:01:01.123456789', scale='tai')
assert t.datetime64 == np.datetime64('2000-01-01T01:01:01.123456789')
# broadcasting
dt3 = (dt64 + (dt64_2-dt64) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale='utc', format='datetime64')
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1], format='datetime64')
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2], format='datetime64'))
assert Time(t3[2, 0], format='datetime64') == t3[2, 0]
def test_epoch_transform(self):
"""Besselian and julian epoch transforms"""
jd = 2457073.05631
t = Time(jd, format='jd', scale='tai', precision=6)
assert allclose_year(t.byear, 2015.1365941020817)
assert allclose_year(t.jyear, 2015.1349933196439)
assert t.byear_str == 'B2015.136594'
assert t.jyear_str == 'J2015.134993'
t2 = Time(t.byear, format='byear', scale='tai')
assert allclose_jd(t2.jd, jd)
t2 = Time(t.jyear, format='jyear', scale='tai')
assert allclose_jd(t2.jd, jd)
t = Time('J2015.134993', scale='tai', precision=6)
assert np.allclose(t.jd, jd, rtol=1e-10, atol=0) # J2015.134993 has 10 digit precision
assert t.byear_str == 'B2015.136594'
def test_input_validation(self):
"""Wrong input type raises error"""
times = [10, 20]
with pytest.raises(ValueError):
Time(times, format='iso', scale='utc')
with pytest.raises(ValueError):
Time('2000:001', format='jd', scale='utc')
with pytest.raises(ValueError): # unguessable
Time([])
with pytest.raises(ValueError):
Time([50000.0], ['bad'], format='mjd', scale='tai')
with pytest.raises(ValueError):
Time(50000.0, 'bad', format='mjd', scale='tai')
with pytest.raises(ValueError):
Time('2005-08-04T00:01:02.000Z', scale='tai')
# regression test against #3396
with pytest.raises(ValueError):
Time(np.nan, format='jd', scale='utc')
with pytest.raises(ValueError):
with pytest.warns(AstropyDeprecationWarning):
Time('2000-01-02T03:04:05(TAI)', scale='utc')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(TAI')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(UT(NIST)')
def test_utc_leap_sec(self):
"""Time behaves properly near or in UTC leap second. This
uses the 2012-06-30 leap second for testing."""
for year, month, day in ((2012, 6, 30), (2016, 12, 31)):
# Start with a day without a leap second and note rollover
yyyy_mm = f'{year:04d}-{month:02d}'
yyyy_mm_dd = f'{year:04d}-{month:02d}-{day:02d}'
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm + '-01 23:59:60.0', scale='utc')
assert t1.iso == yyyy_mm + '-02 00:00:00.000'
# Leap second is different
t1 = Time(yyyy_mm_dd + ' 23:59:59.900', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:59.900'
t1 = Time(yyyy_mm_dd + ' 23:59:60.000', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:60.000'
t1 = Time(yyyy_mm_dd + ' 23:59:60.999', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:60.999'
if month == 6:
yyyy_mm_dd_plus1 = f'{year:04d}-07-01'
else:
yyyy_mm_dd_plus1 = f'{year + 1:04d}-01-01'
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm_dd + ' 23:59:61.0', scale='utc')
assert t1.iso == yyyy_mm_dd_plus1 + ' 00:00:00.000'
# Delta time gives 2 seconds here as expected
t0 = Time(yyyy_mm_dd + ' 23:59:59', scale='utc')
t1 = Time(yyyy_mm_dd_plus1 + ' 00:00:00', scale='utc')
assert allclose_sec((t1 - t0).sec, 2.0)
def test_init_from_time_objects(self):
"""Initialize from one or more Time objects"""
t1 = Time('2007:001', scale='tai')
t2 = Time(['2007-01-02', '2007-01-03'], scale='utc')
# Init from a list of Time objects without an explicit scale
t3 = Time([t1, t2])
# Test that init appropriately combines a scalar (t1) and list (t2)
# and that scale and format are same as first element.
assert len(t3) == 3
assert t3.scale == t1.scale
assert t3.format == t1.format # t1 format is yday
assert np.all(t3.value == np.concatenate([[t1.yday], t2.tai.yday]))
# Init from a single Time object without a scale
t3 = Time(t1)
assert t3.isscalar
assert t3.scale == t1.scale
assert t3.format == t1.format
assert np.all(t3.value == t1.value)
# Init from a single Time object with scale specified
t3 = Time(t1, scale='utc')
assert t3.scale == 'utc'
assert np.all(t3.value == t1.utc.value)
# Init from a list of Time object with scale specified
t3 = Time([t1, t2], scale='tt')
assert t3.scale == 'tt'
assert t3.format == t1.format # yday
assert np.all(t3.value == np.concatenate([[t1.tt.yday], t2.tt.yday]))
# OK, how likely is this... but might as well test.
mjd = np.arange(50000., 50006.)
frac = np.arange(0., 0.999, 0.2)
t4 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc')
t5 = Time([t4[:2], t4[4:5]])
assert t5.shape == (3, 5)
# throw error when deriving local scale time
# from non local time scale
with pytest.raises(ValueError):
Time(t1, scale='local')
class TestVal2:
"""Tests related to val2"""
@pytest.mark.parametrize("d", [
dict(val="2001:001", val2="ignored", scale="utc"),
dict(val={'year': 2015, 'month': 2, 'day': 3,
'hour': 12, 'minute': 13, 'second': 14.567},
val2="ignored", scale="utc"),
dict(val=np.datetime64('2005-02-25'), val2="ignored", scale="utc"),
dict(val=datetime.datetime(2000, 1, 2, 12, 0, 0),
val2="ignored", scale="utc"),
])
def test_unused_val2_raises(self, d):
"""Test that providing val2 is for string input lets user know we won't use it"""
with pytest.raises(ValueError):
Time(**d)
def test_val2(self):
"""Various tests of the val2 input"""
t = Time([0.0, 50000.0], [50000.0, 0.0], format='mjd', scale='tai')
assert t.mjd[0] == t.mjd[1]
assert t.jd[0] == t.jd[1]
def test_val_broadcasts_against_val2(self):
mjd = np.arange(50000., 50007.)
frac = np.arange(0., 0.999, 0.2)
t = Time(mjd[:, np.newaxis], frac, format='mjd', scale='utc')
assert t.shape == (7, 5)
with pytest.raises(ValueError):
Time([0.0, 50000.0], [0.0, 1.0, 2.0], format='mjd', scale='tai')
def test_broadcast_not_writable(self):
val = (2458000 + np.arange(3))[:, None]
val2 = np.linspace(0, 1, 4, endpoint=False)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val + 0 * val2, val2=0 * val + val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1, 2] = t_i
t[1, 2] = t_i
assert t_b[1, 2] == t[1, 2], "writing worked"
assert t_b[0, 2] == t[0, 2], "broadcasting didn't cause problems"
assert t_b[1, 1] == t[1, 1], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
def test_broadcast_one_not_writable(self):
val = (2458000 + np.arange(3))
val2 = np.arange(1)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val + 0 * val2, val2=0 * val + val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1] = t_i
t[1] = t_i
assert t_b[1] == t[1], "writing worked"
assert t_b[0] == t[0], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
class TestSubFormat:
"""Test input and output subformat functionality"""
def test_input_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai')
assert np.all(t.iso == np.array(['2000-01-01 00:00:00.000',
'2000-01-01 01:01:00.000',
'2000-01-01 01:01:01.000',
'2000-01-01 01:01:01.123']))
# Heterogeneous input formats with in_subfmt='date_*'
times = ['2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai',
in_subfmt='date_*')
assert np.all(t.iso == np.array(['2000-01-01 01:01:00.000',
'2000-01-01 01:01:01.000',
'2000-01-01 01:01:01.123']))
def test_input_subformat_fail(self):
"""Failed format matching"""
with pytest.raises(ValueError):
Time('2000-01-01 01:01', format='iso', scale='tai',
in_subfmt='date')
def test_bad_input_subformat(self):
"""Non-existent input subformat"""
with pytest.raises(ValueError):
Time('2000-01-01 01:01', format='iso', scale='tai',
in_subfmt='doesnt exist')
def test_output_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai',
out_subfmt='date_hm')
assert np.all(t.iso == np.array(['2000-01-01 00:00',
'2000-01-01 01:01',
'2000-01-01 01:01',
'2000-01-01 01:01']))
def test_fits_format(self):
"""FITS format includes bigger years."""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01T01:01:01', '2000-01-01T01:01:01.123']
t = Time(times, format='fits', scale='tai')
assert np.all(t.fits == np.array(['2000-01-01T00:00:00.000',
'2000-01-01T01:01:01.000',
'2000-01-01T01:01:01.123']))
# Explicit long format for output, default scale is UTC.
t2 = Time(times, format='fits', out_subfmt='long*')
assert np.all(t2.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'+02000-01-01T01:01:01.123']))
# Implicit long format for output, because of negative year.
times[2] = '-00594-01-01'
t3 = Time(times, format='fits', scale='tai')
assert np.all(t3.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'-00594-01-01T00:00:00.000']))
# Implicit long format for output, because of large positive year.
times[2] = '+10594-01-01'
t4 = Time(times, format='fits', scale='tai')
assert np.all(t4.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'+10594-01-01T00:00:00.000']))
def test_yday_format(self):
"""Year:Day_of_year format"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-12-01', '2001-12-01 01:01:01.123']
t = Time(times, format='iso', scale='tai')
t.out_subfmt = 'date_hm'
assert np.all(t.yday == np.array(['2000:336:00:00',
'2001:335:01:01']))
t.out_subfmt = '*'
assert np.all(t.yday == np.array(['2000:336:00:00:00.000',
'2001:335:01:01:01.123']))
def test_scale_input(self):
"""Test for issues related to scale input"""
# Check case where required scale is defined by the TimeFormat.
# All three should work.
t = Time(100.0, format='cxcsec', scale='utc')
assert t.scale == 'utc'
t = Time(100.0, format='unix', scale='tai')
assert t.scale == 'tai'
t = Time(100.0, format='gps', scale='utc')
assert t.scale == 'utc'
# Check that bad scale is caught when format is specified
with pytest.raises(ScaleValueError):
Time(1950.0, format='byear', scale='bad scale')
# Check that bad scale is caught when format is auto-determined
with pytest.raises(ScaleValueError):
Time('2000:001:00:00:00', scale='bad scale')
def test_fits_scale(self):
"""Test that the previous FITS-string formatting can still be handled
but with a DeprecationWarning."""
for inputs in (("2000-01-02(TAI)", "tai"),
("1999-01-01T00:00:00.123(ET(NIST))", "tt"),
("2014-12-12T01:00:44.1(UTC)", "utc")):
with pytest.warns(AstropyDeprecationWarning):
t = Time(inputs[0])
assert t.scale == inputs[1]
# Create Time using normal ISOT syntax and compare with FITS
t2 = Time(inputs[0][:inputs[0].index("(")], format="isot",
scale=inputs[1])
assert t == t2
# Explicit check that conversions still work despite warning
with pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:00.123456789(UTC)')
t = t.tai
assert t.isot == '1999-01-01T00:00:32.123'
with pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(TAI)')
t = t.utc
assert t.isot == '1999-01-01T00:00:00.123'
# Check scale consistency
with pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(TAI)', scale="tai")
assert t.scale == "tai"
with pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(ET)', scale="tt")
assert t.scale == "tt"
with pytest.raises(ValueError), pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(TAI)', scale="utc")
def test_scale_default(self):
"""Test behavior when no scale is provided"""
# These first three are TimeFromEpoch and have an intrinsic time scale
t = Time(100.0, format='cxcsec')
assert t.scale == 'tt'
t = Time(100.0, format='unix')
assert t.scale == 'utc'
t = Time(100.0, format='gps')
assert t.scale == 'tai'
for date in ('2000:001', '2000-01-01T00:00:00'):
t = Time(date)
assert t.scale == 'utc'
t = Time(2000.1, format='byear')
assert t.scale == 'tt'
t = Time('J2000')
assert t.scale == 'tt'
def test_epoch_times(self):
"""Test time formats derived from EpochFromTime"""
t = Time(0.0, format='cxcsec', scale='tai')
assert t.tt.iso == '1998-01-01 00:00:00.000'
# Create new time object from this one and change scale, format
t2 = Time(t, scale='tt', format='iso')
assert t2.value == '1998-01-01 00:00:00.000'
# Value take from Chandra.Time.DateTime('2010:001:00:00:00').secs
t_cxcsec = 378691266.184
t = Time(t_cxcsec, format='cxcsec', scale='utc')
assert allclose_sec(t.value, t_cxcsec)
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.value, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
assert t.yday == '2010:001:00:00:00.000'
t = Time('2010:001:00:00:00.000', scale='utc')
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
# Round trip through epoch time
for scale in ('utc', 'tt'):
t = Time('2000:001', scale=scale)
t2 = Time(t.unix, scale=scale, format='unix')
assert getattr(t2, scale).iso == '2000-01-01 00:00:00.000'
# Test unix time. Values taken from http://en.wikipedia.org/wiki/Unix_time
t = Time('2013-05-20 21:18:46', scale='utc')
assert allclose_sec(t.unix, 1369084726.0)
assert allclose_sec(t.tt.unix, 1369084726.0)
# Values from issue #1118
t = Time('2004-09-16T23:59:59', scale='utc')
assert allclose_sec(t.unix, 1095379199.0)
def test_plot_date(self):
"""Test the plot_date format.
Depending on the situation with matplotlib, this can give different
results because the plot date epoch time changed in matplotlib 3.3. This
test tries to use the matplotlib date2num function to make the test
independent of version, but if matplotlib isn't available then the code
(and test) use the pre-3.3 epoch.
"""
try:
from matplotlib.dates import date2num
except ImportError:
# No matplotlib, in which case this uses the epoch 0000-12-31
# as per matplotlib < 3.3.
# Value from:
# matplotlib.dates.set_epoch('0000-12-31')
# val = matplotlib.dates.date2num('2000-01-01')
val = 730120.0
else:
val = date2num(datetime.datetime(2000, 1, 1))
t = Time('2000-01-01 00:00:00', scale='utc')
assert np.allclose(t.plot_date, val, atol=1e-5, rtol=0)
class TestNumericalSubFormat:
def test_explicit_example(self):
t = Time('54321.000000000001', format='mjd')
assert t == Time(54321, 1e-12, format='mjd')
assert t.mjd == 54321. # Lost precision!
assert t.value == 54321. # Lost precision!
assert t.to_value('mjd') == 54321. # Lost precision!
assert t.to_value('mjd', subfmt='str') == '54321.000000000001'
assert t.to_value('mjd', 'bytes') == b'54321.000000000001'
expected_long = np.longdouble(54321.) + np.longdouble(1e-12)
# Check we're the same to within the double holding jd2
# (which is less precise than longdouble on arm64).
assert np.allclose(t.to_value('mjd', subfmt='long'),
expected_long, rtol=0, atol=np.finfo(float).eps)
t.out_subfmt = 'str'
assert t.value == '54321.000000000001'
assert t.to_value('mjd') == 54321. # Lost precision!
assert t.mjd == '54321.000000000001'
assert t.to_value('mjd', subfmt='bytes') == b'54321.000000000001'
assert t.to_value('mjd', subfmt='float') == 54321. # Lost precision!
t.out_subfmt = 'long'
assert np.allclose(t.value, expected_long,
rtol=0., atol=np.finfo(float).eps)
assert np.allclose(t.to_value('mjd', subfmt=None), expected_long,
rtol=0., atol=np.finfo(float).eps)
assert np.allclose(t.mjd, expected_long,
rtol=0., atol=np.finfo(float).eps)
assert t.to_value('mjd', subfmt='str') == '54321.000000000001'
assert t.to_value('mjd', subfmt='float') == 54321. # Lost precision!
@pytest.mark.skipif(np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float")
def test_explicit_longdouble(self):
i = 54321
# Create a different long double (which will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
f = max(2.**(-np.finfo(np.longdouble).nmant) * 65536,
np.finfo(float).eps)
mjd_long = np.longdouble(i) + np.longdouble(f)
assert mjd_long != i, "longdouble failure!"
t = Time(mjd_long, format='mjd')
expected = Time(i, f, format='mjd')
assert abs(t - expected) <= 20. * u.ps
t_float = Time(i + f, format='mjd')
assert t_float == Time(i, format='mjd')
assert t_float != t
assert t.value == 54321. # Lost precision!
assert np.allclose(t.to_value('mjd', subfmt='long'), mjd_long,
rtol=0., atol=np.finfo(float).eps)
t2 = Time(mjd_long, format='mjd', out_subfmt='long')
assert np.allclose(t2.value, mjd_long,
rtol=0., atol=np.finfo(float).eps)
@pytest.mark.skipif(np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float")
def test_explicit_longdouble_one_val(self):
"""Ensure either val1 or val2 being longdouble is possible.
Regression test for issue gh-10033.
"""
i = 54321
f = max(2.**(-np.finfo(np.longdouble).nmant) * 65536,
np.finfo(float).eps)
t1 = Time(i, f, format='mjd')
t2 = Time(np.longdouble(i), f, format='mjd')
t3 = Time(i, np.longdouble(f), format='mjd')
t4 = Time(np.longdouble(i), np.longdouble(f), format='mjd')
assert t1 == t2 == t3 == t4
@pytest.mark.skipif(np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float")
@pytest.mark.parametrize("fmt", ["mjd", "unix", "cxcsec"])
def test_longdouble_for_other_types(self, fmt):
t_fmt = getattr(Time(58000, format="mjd"), fmt) # Get regular float
t_fmt_long = np.longdouble(t_fmt)
# Create a different long double (ensuring it will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
atol = np.finfo(float).eps * (1. if fmt == 'mjd' else 24. * 3600.)
t_fmt_long2 = t_fmt_long + max(
t_fmt_long * np.finfo(np.longdouble).eps * 2, atol)
assert t_fmt_long != t_fmt_long2, "longdouble weird!"
tm = Time(t_fmt_long, format=fmt)
tm2 = Time(t_fmt_long2, format=fmt)
assert tm != tm2
tm_long2 = tm2.to_value(fmt, subfmt='long')
assert np.allclose(tm_long2, t_fmt_long2, rtol=0., atol=atol)
def test_subformat_input(self):
s = '54321.01234567890123456789'
i, f = s.split('.') # Note, OK only for fraction < 0.5
t = Time(float(i), float('.' + f), format='mjd')
t_str = Time(s, format='mjd')
t_bytes = Time(s.encode('ascii'), format='mjd')
t_decimal = Time(Decimal(s), format='mjd')
assert t_str == t
assert t_bytes == t
assert t_decimal == t
@pytest.mark.parametrize('out_subfmt', ('str', 'bytes'))
def test_subformat_output(self, out_subfmt):
i = 54321
f = np.array([0., 1e-9, 1e-12])
t = Time(i, f, format='mjd', out_subfmt=out_subfmt)
t_value = t.value
expected = np.array(['54321.0',
'54321.000000001',
'54321.000000000001'], dtype=out_subfmt)
assert np.all(t_value == expected)
assert np.all(Time(expected, format='mjd') == t)
# Explicit sub-format.
t = Time(i, f, format='mjd')
t_mjd_subfmt = t.to_value('mjd', subfmt=out_subfmt)
assert np.all(t_mjd_subfmt == expected)
@pytest.mark.parametrize('fmt,string,val1,val2', [
('jd', '2451544.5333981', 2451544.5, .0333981),
('decimalyear', '2000.54321', 2000., .54321),
('cxcsec', '100.0123456', 100.0123456, None),
('unix', '100.0123456', 100.0123456, None),
('gps', '100.0123456', 100.0123456, None),
('byear', '1950.1', 1950.1, None),
('jyear', '2000.1', 2000.1, None)])
def test_explicit_string_other_formats(self, fmt, string, val1, val2):
t = Time(string, format=fmt)
assert t == Time(val1, val2, format=fmt)
assert t.to_value(fmt, subfmt='str') == string
def test_basic_subformat_setting(self):
t = Time('2001', format='jyear', scale='tai')
t.format = "mjd"
t.out_subfmt = "str"
assert t.value.startswith("5")
def test_basic_subformat_cache_does_not_crash(self):
t = Time('2001', format='jyear', scale='tai')
t.to_value('mjd', subfmt='str')
assert ('mjd', 'str') in t.cache['format']
t.to_value('mjd', 'str')
@pytest.mark.parametrize("fmt", ["jd", "mjd", "cxcsec", "unix", "gps", "jyear"])
def test_decimal_context_does_not_affect_string(self, fmt):
t = Time('2001', format='jyear', scale='tai')
t.format = fmt
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value(fmt, "str")
t2 = Time('2001', format='jyear', scale='tai')
t2.format = fmt
with localcontext() as ctx:
ctx.prec = 40
t2_s_40 = t.to_value(fmt, "str")
assert t_s_2 == t2_s_40, "String representation should not depend on Decimal context"
def test_decimal_context_caching(self):
t = Time(val=58000, val2=1e-14, format='mjd', scale='tai')
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value('mjd', subfmt='decimal')
t2 = Time(val=58000, val2=1e-14, format='mjd', scale='tai')
with localcontext() as ctx:
ctx.prec = 40
t_s_40 = t.to_value('mjd', subfmt='decimal')
t2_s_40 = t2.to_value('mjd', subfmt='decimal')
assert t_s_2 == t_s_40, "Should be the same but cache might make this automatic"
assert t_s_2 == t2_s_40, "Different precision should produce the same results"
@pytest.mark.parametrize("f, s, t", [("sec", "long", np.longdouble),
("sec", "decimal", Decimal),
("sec", "str", str)])
def test_timedelta_basic(self, f, s, t):
dt = (Time("58000", format="mjd", scale="tai")
- Time("58001", format="mjd", scale="tai"))
value = dt.to_value(f, s)
assert isinstance(value, t)
dt.format = f
dt.out_subfmt = s
assert isinstance(dt.value, t)
assert isinstance(dt.to_value(f, None), t)
def test_need_format_argument(self):
t = Time('J2000')
with pytest.raises(TypeError, match="missing.*required.*'format'"):
t.to_value()
with pytest.raises(ValueError, match='format must be one of'):
t.to_value('julian')
def test_wrong_in_subfmt(self):
with pytest.raises(ValueError, match='not among selected'):
Time("58000", format='mjd', in_subfmt='float')
with pytest.raises(ValueError, match='not among selected'):
Time(np.longdouble(58000), format='mjd', in_subfmt='float')
with pytest.raises(ValueError, match='not among selected'):
Time(58000., format='mjd', in_subfmt='str')
with pytest.raises(ValueError, match='not among selected'):
Time(58000., format='mjd', in_subfmt='long')
def test_wrong_subfmt(self):
t = Time(58000., format='mjd')
with pytest.raises(ValueError, match='must match one'):
t.to_value('mjd', subfmt='parrot')
with pytest.raises(ValueError, match='must match one'):
t.out_subfmt = 'parrot'
with pytest.raises(ValueError, match='must match one'):
t.in_subfmt = 'parrot'
def test_not_allowed_subfmt(self):
"""Test case where format has no defined subfmts"""
t = Time('J2000')
match = 'subformat not allowed for format jyear_str'
with pytest.raises(ValueError, match=match):
t.to_value('jyear_str', subfmt='parrot')
with pytest.raises(ValueError, match=match):
t.out_subfmt = 'parrot'
with pytest.raises(ValueError, match=match):
Time('J2000', out_subfmt='parrot')
with pytest.raises(ValueError, match=match):
t.in_subfmt = 'parrot'
with pytest.raises(ValueError, match=match):
Time('J2000', format='jyear_str', in_subfmt='parrot')
def test_switch_to_format_with_no_out_subfmt(self):
t = Time('2001-01-01', out_subfmt='date_hm')
assert t.out_subfmt == 'date_hm'
# Now do an in-place switch to format 'jyear_str' that has no subfmts
# where out_subfmt is changed to '*'.
t.format = 'jyear_str'
assert t.out_subfmt == '*'
assert t.value == 'J2001.001'
class TestSofaErrors:
"""Test that erfa status return values are handled correctly"""
def test_bad_time(self):
iy = np.array([2000], dtype=np.intc)
im = np.array([2000], dtype=np.intc) # bad month
id = np.array([2000], dtype=np.intc) # bad day
with pytest.raises(ValueError): # bad month, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = -5000
im[0] = 2
with pytest.raises(ValueError): # bad year, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = 2000
with pytest.warns(ErfaWarning, match=r'bad day \(JD computed\)') as w:
djm0, djm = erfa.cal2jd(iy, im, id)
assert len(w) == 1
assert allclose_jd(djm0, [2400000.5])
assert allclose_jd(djm, [53574.])
class TestCopyReplicate:
"""Test issues related to copying and replicating data"""
def test_immutable_input(self):
"""Internals are never mutable."""
jds = np.array([2450000.5], dtype=np.double)
t = Time(jds, format='jd', scale='tai')
assert allclose_jd(t.jd, jds)
jds[0] = 2458654
assert not allclose_jd(t.jd, jds)
mjds = np.array([50000.0], dtype=np.double)
t = Time(mjds, format='mjd', scale='tai')
assert allclose_jd(t.jd, [2450000.5])
mjds[0] = 0.0
assert allclose_jd(t.jd, [2450000.5])
def test_replicate(self):
"""Test replicate method"""
t = Time(['2000:001'], format='yday', scale='tai',
location=('45d', '45d'))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.replicate()
assert t.yday == t2.yday
assert t.format == t2.format
assert t.scale == t2.scale
assert t.location == t2.location
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday == t2.yday
assert t.yday != t_yday # prove that it changed
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x == t2.location.x
assert t.location.x != t_loc_x # prove that it changed
def test_copy(self):
"""Test copy method"""
t = Time('2000:001', format='yday', scale='tai',
location=('45d', '45d'))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.copy()
assert t.yday == t2.yday
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are not sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday != t2.yday
assert t.yday == t_yday # prove that it did not change
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x != t2.location.x
assert t.location.x == t_loc_x # prove that it changed
class TestStardate:
"""Sync chronometers with Starfleet Command"""
def test_iso_to_stardate(self):
assert str(Time('2320-01-01', scale='tai').stardate)[:7] == '1368.99'
assert str(Time('2330-01-01', scale='tai').stardate)[:8] == '10552.76'
assert str(Time('2340-01-01', scale='tai').stardate)[:8] == '19734.02'
@pytest.mark.parametrize('dates',
[(10000, '2329-05-26 03:02'),
(20000, '2340-04-15 19:05'),
(30000, '2351-03-07 11:08')])
def test_stardate_to_iso(self, dates):
stardate, iso = dates
t_star = Time(stardate, format='stardate')
t_iso = Time(t_star, format='iso', out_subfmt='date_hm')
assert t_iso.value == iso
def test_python_builtin_copy():
t = Time('2000:001', format='yday', scale='tai')
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
assert t.jd == t2.jd
assert t.jd == t3.jd
def test_now():
"""
Tests creating a Time object with the `now` class method.
"""
now = datetime.datetime.utcnow()
t = Time.now()
assert t.format == 'datetime'
assert t.scale == 'utc'
dt = t.datetime - now # a datetime.timedelta object
# this gives a .1 second margin between the `utcnow` call and the `Time`
# initializer, which is really way more generous than necessary - typical
# times are more like microseconds. But it seems safer in case some
# platforms have slow clock calls or something.
assert dt.total_seconds() < 0.1
def test_decimalyear():
t = Time('2001:001', format='yday')
assert t.decimalyear == 2001.0
t = Time(2000.0, [0.5, 0.75], format='decimalyear')
assert np.all(t.value == [2000.5, 2000.75])
jd0 = Time('2000:001').jd
jd1 = Time('2001:001').jd
d_jd = jd1 - jd0
assert np.all(t.jd == [jd0 + 0.5 * d_jd,
jd0 + 0.75 * d_jd])
def test_fits_year0():
t = Time(1721425.5, format='jd', scale='tai')
assert t.fits == '0001-01-01T00:00:00.000'
t = Time(1721425.5 - 366., format='jd', scale='tai')
assert t.fits == '+00000-01-01T00:00:00.000'
t = Time(1721425.5 - 366. - 365., format='jd', scale='tai')
assert t.fits == '-00001-01-01T00:00:00.000'
def test_fits_year10000():
t = Time(5373484.5, format='jd', scale='tai')
assert t.fits == '+10000-01-01T00:00:00.000'
t = Time(5373484.5 - 365., format='jd', scale='tai')
assert t.fits == '9999-01-01T00:00:00.000'
t = Time(5373484.5, -1. / 24. / 3600., format='jd', scale='tai')
assert t.fits == '9999-12-31T23:59:59.000'
def test_dir():
t = Time('2000:001', format='yday', scale='tai')
assert 'utc' in dir(t)
def test_time_from_epoch_jds():
"""Test that jd1/jd2 in a TimeFromEpoch format is always well-formed:
jd1 is an integral value and abs(jd2) <= 0.5.
"""
# From 1999:001 00:00 to 1999:002 12:00 by a non-round step. This will
# catch jd2 == 0 and a case of abs(jd2) == 0.5.
cxcsecs = np.linspace(0, 86400 * 1.5, 49)
for cxcsec in cxcsecs:
t = Time(cxcsec, format='cxcsec')
assert np.round(t.jd1) == t.jd1
assert np.abs(t.jd2) <= 0.5
t = Time(cxcsecs, format='cxcsec')
assert np.all(np.round(t.jd1) == t.jd1)
assert np.all(np.abs(t.jd2) <= 0.5)
assert np.any(np.abs(t.jd2) == 0.5) # At least one exactly 0.5
def test_bool():
"""Any Time object should evaluate to True unless it is empty [#3520]."""
t = Time(np.arange(50000, 50010), format='mjd', scale='utc')
assert bool(t) is True
assert bool(t[0]) is True
assert bool(t[:0]) is False
def test_len_size():
"""Check length of Time objects and that scalar ones do not have one."""
t = Time(np.arange(50000, 50010), format='mjd', scale='utc')
assert len(t) == 10 and t.size == 10
t1 = Time(np.arange(50000, 50010).reshape(2, 5), format='mjd', scale='utc')
assert len(t1) == 2 and t1.size == 10
# Can have length 1 or length 0 arrays.
t2 = t[:1]
assert len(t2) == 1 and t2.size == 1
t3 = t[:0]
assert len(t3) == 0 and t3.size == 0
# But cannot get length from scalar.
t4 = t[0]
with pytest.raises(TypeError) as err:
len(t4)
# Ensure we're not just getting the old error of
# "object of type 'float' has no len()".
assert 'Time' in str(err.value)
def test_TimeFormat_scale():
"""guard against recurrence of #1122, where TimeFormat class looses uses
attributes (delta_ut1_utc here), preventing conversion to unix, cxc"""
t = Time('1900-01-01', scale='ut1')
t.delta_ut1_utc = 0.0
with pytest.warns(ErfaWarning):
t.unix
assert t.unix == t.utc.unix
@pytest.mark.remote_data
def test_scale_conversion(monkeypatch):
# Check that if we have internet, and downloading is allowed, we
# can get conversion to UT1 for the present, since we will download
# IERS_A in IERS_Auto.
monkeypatch.setattr('astropy.utils.iers.conf.auto_download', True)
Time(Time.now().cxcsec, format='cxcsec', scale='ut1')
def test_byteorder():
"""Ensure that bigendian and little-endian both work (closes #2942)"""
mjd = np.array([53000.00, 54000.00])
big_endian = mjd.astype('>f8')
little_endian = mjd.astype('<f8')
time_mjd = Time(mjd, format='mjd')
time_big = Time(big_endian, format='mjd')
time_little = Time(little_endian, format='mjd')
assert np.all(time_big == time_mjd)
assert np.all(time_little == time_mjd)
def test_datetime_tzinfo():
"""
Test #3160 that time zone info in datetime objects is respected.
"""
class TZm6(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=-6)
d = datetime.datetime(2002, 1, 2, 10, 3, 4, tzinfo=TZm6())
t = Time(d)
assert t.value == datetime.datetime(2002, 1, 2, 16, 3, 4)
def test_subfmts_regex():
"""
Test having a custom subfmts with a regular expression
"""
class TimeLongYear(TimeString):
name = 'longyear'
subfmts = (('date',
r'(?P<year>[+-]\d{5})-%m-%d', # hybrid
'{year:+06d}-{mon:02d}-{day:02d}'),)
t = Time('+02000-02-03', format='longyear')
assert t.value == '+02000-02-03'
assert t.jd == Time('2000-02-03').jd
def test_set_format_basic():
"""
Test basics of setting format attribute.
"""
for format, value in (('jd', 2451577.5),
('mjd', 51577.0),
('cxcsec', 65923264.184), # confirmed with Chandra.Time
('datetime', datetime.datetime(2000, 2, 3, 0, 0)),
('iso', '2000-02-03 00:00:00.000')):
t = Time('+02000-02-03', format='fits')
t0 = t.replicate()
t.format = format
assert t.value == value
# Internal jd1 and jd2 are preserved
assert t._time.jd1 is t0._time.jd1
assert t._time.jd2 is t0._time.jd2
def test_unix_tai_format():
t = Time('2020-01-01', scale='utc')
assert allclose_sec(t.unix_tai - t.unix, 37.0)
t = Time('1970-01-01', scale='utc')
assert allclose_sec(t.unix_tai - t.unix, 8 + 8.2e-05)
def test_set_format_shares_subfmt():
"""
Set format and round trip through a format that shares out_subfmt
"""
t = Time('+02000-02-03', format='fits', out_subfmt='date_hms', precision=5)
tc = t.copy()
t.format = 'isot'
assert t.precision == 5
assert t.out_subfmt == 'date_hms'
assert t.value == '2000-02-03T00:00:00.00000'
t.format = 'fits'
assert t.value == tc.value
assert t.precision == 5
def test_set_format_does_not_share_subfmt():
"""
Set format and round trip through a format that does not share out_subfmt
"""
t = Time('+02000-02-03', format='fits', out_subfmt='longdate')
t.format = 'isot'
assert t.out_subfmt == '*' # longdate_hms not there, goes to default
assert t.value == '2000-02-03T00:00:00.000'
t.format = 'fits'
assert t.out_subfmt == '*'
assert t.value == '2000-02-03T00:00:00.000' # date_hms
def test_replicate_value_error():
"""
Passing a bad format to replicate should raise ValueError, not KeyError.
PR #3857.
"""
t1 = Time('2007:001', scale='tai')
with pytest.raises(ValueError) as err:
t1.replicate(format='definitely_not_a_valid_format')
assert 'format must be one of' in str(err.value)
def test_remove_astropy_time():
"""
Make sure that 'astropy_time' format is really gone after #3857. Kind of
silly test but just to be sure.
"""
t1 = Time('2007:001', scale='tai')
assert 'astropy_time' not in t1.FORMATS
with pytest.raises(ValueError) as err:
Time(t1, format='astropy_time')
assert 'format must be one of' in str(err.value)
def test_isiterable():
"""
Ensure that scalar `Time` instances are not reported as iterable by the
`isiterable` utility.
Regression test for https://github.com/astropy/astropy/issues/4048
"""
t1 = Time.now()
assert not isiterable(t1)
t2 = Time(['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00'],
format='iso', scale='utc')
assert isiterable(t2)
def test_to_datetime():
tz = TimezoneInfo(utc_offset=-10 * u.hour, tzname='US/Hawaii')
# The above lines produces a `datetime.tzinfo` object similar to:
# tzinfo = pytz.timezone('US/Hawaii')
time = Time('2010-09-03 00:00:00')
tz_aware_datetime = time.to_datetime(tz)
assert tz_aware_datetime.time() == datetime.time(14, 0)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1990-09-03 06:00:00'])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
with pytest.raises(ValueError, match=r'does not support leap seconds'):
Time('2015-06-30 23:59:60.000').to_datetime()
@pytest.mark.skipif('not HAS_PYTZ')
def test_to_datetime_pytz():
import pytz
tz = pytz.timezone('US/Hawaii')
time = Time('2010-09-03 00:00:00')
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz_aware_datetime.time() == datetime.time(14, 0)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1990-09-03 06:00:00'])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
def test_cache():
t = Time('2010-09-03 00:00:00')
t2 = Time('2010-09-03 00:00:00')
# Time starts out without a cache
assert 'cache' not in t._time.__dict__
# Access the iso format and confirm that the cached version is as expected
t.iso
assert t.cache['format']['iso'] == t2.iso
# Access the TAI scale and confirm that the cached version is as expected
t.tai
assert t.cache['scale']['tai'] == t2.tai
# New Time object after scale transform does not have a cache yet
assert 'cache' not in t.tt._time.__dict__
# Clear the cache
del t.cache
assert 'cache' not in t._time.__dict__
# Check accessing the cache creates an empty dictionary
assert not t.cache
assert 'cache' in t._time.__dict__
def test_epoch_date_jd_is_day_fraction():
"""
Ensure that jd1 and jd2 of an epoch Time are respect the (day, fraction) convention
(see #6638)
"""
t0 = Time("J2000", scale="tdb")
assert t0.jd1 == 2451545.0
assert t0.jd2 == 0.0
t1 = Time(datetime.datetime(2000, 1, 1, 12, 0, 0), scale="tdb")
assert t1.jd1 == 2451545.0
assert t1.jd2 == 0.0
def test_sum_is_equivalent():
"""
Ensure that two equal dates defined in different ways behave equally (#6638)
"""
t0 = Time("J2000", scale="tdb")
t1 = Time("2000-01-01 12:00:00", scale="tdb")
assert t0 == t1
assert (t0 + 1 * u.second) == (t1 + 1 * u.second)
def test_string_valued_columns():
# Columns have a nice shim that translates bytes to string as needed.
# Ensure Time can handle these. Use multi-d array just to be sure.
times = [[[f'{y:04d}-{m:02d}-{d:02d}' for d in range(1, 3)]
for m in range(5, 7)] for y in range(2012, 2014)]
cutf32 = Column(times)
cbytes = cutf32.astype('S')
tutf32 = Time(cutf32)
tbytes = Time(cbytes)
assert np.all(tutf32 == tbytes)
tutf32 = Time(Column(['B1950']))
tbytes = Time(Column([b'B1950']))
assert tutf32 == tbytes
# Regression tests for arrays with entries with unequal length. gh-6903.
times = Column([b'2012-01-01', b'2012-01-01T00:00:00'])
assert np.all(Time(times) == Time(['2012-01-01', '2012-01-01T00:00:00']))
def test_bytes_input():
tstring = '2011-01-02T03:04:05'
tbytes = b'2011-01-02T03:04:05'
assert tbytes.decode('ascii') == tstring
t0 = Time(tstring)
t1 = Time(tbytes)
assert t1 == t0
tarray = np.array(tbytes)
assert tarray.dtype.kind == 'S'
t2 = Time(tarray)
assert t2 == t0
def test_writeable_flag():
t = Time([1, 2, 3], format='cxcsec')
t[1] = 5.0
assert allclose_sec(t[1].value, 5.0)
t.writeable = False
with pytest.raises(ValueError) as err:
t[1] = 5.0
assert 'Time object is read-only. Make a copy()' in str(err.value)
with pytest.raises(ValueError) as err:
t[:] = 5.0
assert 'Time object is read-only. Make a copy()' in str(err.value)
t.writeable = True
t[1] = 10.0
assert allclose_sec(t[1].value, 10.0)
# Scalar is writeable because it gets boxed into a zero-d array
t = Time('2000:001', scale='utc')
t[()] = '2000:002'
assert t.value.startswith('2000:002')
# Transformed attribute is not writeable
t = Time(['2000:001', '2000:002'], scale='utc')
t2 = t.tt # t2 is read-only now because t.tt is cached
with pytest.raises(ValueError) as err:
t2[0] = '2005:001'
assert 'Time object is read-only. Make a copy()' in str(err.value)
def test_setitem_location():
loc = EarthLocation(x=[1, 2] * u.m, y=[3, 4] * u.m, z=[5, 6] * u.m)
t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc)
# Succeeds because the right hand side makes no implication about
# location and just inherits t.location
t[0, 0] = 0
assert allclose_sec(t.value, [[0, 2], [3, 4]])
# Fails because the right hand side has location=None
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-1, format='cxcsec')
assert ('cannot set to Time with different location: '
'expected location={} and '
'got location=None'.format(loc[0])) in str(err.value)
# Succeeds because the right hand side correctly sets location
t[0, 0] = Time(-2, format='cxcsec', location=loc[0])
assert allclose_sec(t.value, [[-2, 2], [3, 4]])
# Fails because the right hand side has different location
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format='cxcsec', location=loc[1])
assert ('cannot set to Time with different location: '
'expected location={} and '
'got location={}'.format(loc[0], loc[1])) in str(err.value)
# Fails because the Time has None location and RHS has defined location
t = Time([[1, 2], [3, 4]], format='cxcsec')
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format='cxcsec', location=loc[1])
assert ('cannot set to Time with different location: '
'expected location=None and '
'got location={}'.format(loc[1])) in str(err.value)
# Broadcasting works
t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc)
t[0, :] = Time([-3, -4], format='cxcsec', location=loc)
assert allclose_sec(t.value, [[-3, -4], [3, 4]])
def test_setitem_from_python_objects():
t = Time([[1, 2], [3, 4]], format='cxcsec')
assert t.cache == {}
t.iso
assert 'iso' in t.cache['format']
assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:00:02.000'],
['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']])
# Setting item clears cache
t[0, 1] = 100
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100],
[3, 4]])
assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:01:40.000'],
['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']])
# Set with a float value
t.iso
t[1, :] = 200
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100],
[200, 200]])
# Array of strings in yday format
t[:, 1] = ['1998:002', '1998:003']
assert allclose_sec(t.value, [[1, 86400 * 1],
[200, 86400 * 2]])
# Incompatible numeric value
t = Time(['2000:001', '2000:002'])
t[0] = '2001:001'
with pytest.raises(ValueError) as err:
t[0] = 100
assert 'cannot convert value to a compatible Time object' in str(err.value)
def test_setitem_from_time_objects():
"""Set from existing Time object.
"""
# Set from time object with different scale
t = Time(['2000:001', '2000:002'], scale='utc')
t2 = Time(['2000:010'], scale='tai')
t[1] = t2[0]
assert t.value[1] == t2.utc.value[0]
# Time object with different scale and format
t = Time(['2000:001', '2000:002'], scale='utc')
t2.format = 'jyear'
t[1] = t2[0]
assert t.yday[1] == t2.utc.yday[0]
def test_setitem_bad_item():
t = Time([1, 2], format='cxcsec')
with pytest.raises(IndexError):
t['asdf'] = 3
def test_setitem_deltas():
"""Setting invalidates any transform deltas"""
t = Time([1, 2], format='cxcsec')
t.delta_tdb_tt = [1, 2]
t.delta_ut1_utc = [3, 4]
t[1] = 3
assert not hasattr(t, '_delta_tdb_tt')
assert not hasattr(t, '_delta_ut1_utc')
def test_subclass():
"""Check that we can initialize subclasses with a Time instance."""
# Ref: Issue gh-#7449 and PR gh-#7453.
class _Time(Time):
pass
t1 = Time('1999-01-01T01:01:01')
t2 = _Time(t1)
assert t2.__class__ == _Time
assert t1 == t2
def test_strftime_scalar():
"""Test of Time.strftime
"""
time_string = '2010-09-03 06:00:00'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S') == time_string
def test_strftime_array():
tstrings = ['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1995-12-31 23:59:60']
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S').tolist() == tstrings
def test_strftime_array_2():
tstrings = [['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1995-12-31 23:59:60']]
tstrings = np.array(tstrings)
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert np.all(t.strftime('%Y-%m-%d %H:%M:%S') == tstrings)
assert t.strftime('%Y-%m-%d %H:%M:%S').shape == tstrings.shape
def test_strftime_leapsecond():
time_string = '1995-12-31 23:59:60'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S') == time_string
def test_strptime_scalar():
"""Test of Time.strptime
"""
time_string = '2007-May-04 21:08:12'
time_object = Time('2007-05-04 21:08:12')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S')
assert t == time_object
def test_strptime_array():
"""Test of Time.strptime
"""
tstrings = [['1998-Jan-01 00:00:01', '1998-Jan-01 00:00:02'],
['1998-Jan-01 00:00:03', '1998-Jan-01 00:00:04']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1998-01-01 00:00:04']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_badinput():
tstrings = [1, 2, 3]
with pytest.raises(TypeError):
Time.strptime(tstrings, '%S')
def test_strptime_input_bytes_scalar():
time_string = b'2007-May-04 21:08:12'
time_object = Time('2007-05-04 21:08:12')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S')
assert t == time_object
def test_strptime_input_bytes_array():
tstrings = [[b'1998-Jan-01 00:00:01', b'1998-Jan-01 00:00:02'],
[b'1998-Jan-01 00:00:03', b'1998-Jan-01 00:00:04']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1998-01-01 00:00:04']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_leapsecond():
time_obj1 = Time('1995-12-31T23:59:60', format='isot')
time_obj2 = Time.strptime('1995-Dec-31 23:59:60', '%Y-%b-%d %H:%M:%S')
assert time_obj1 == time_obj2
def test_strptime_3_digit_year():
time_obj1 = Time('0995-12-31T00:00:00', format='isot', scale='tai')
time_obj2 = Time.strptime('0995-Dec-31 00:00:00', '%Y-%b-%d %H:%M:%S',
scale='tai')
assert time_obj1 == time_obj2
def test_strptime_fracsec_scalar():
time_string = '2007-May-04 21:08:12.123'
time_object = Time('2007-05-04 21:08:12.123')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S.%f')
assert t == time_object
def test_strptime_fracsec_array():
"""Test of Time.strptime
"""
tstrings = [['1998-Jan-01 00:00:01.123', '1998-Jan-01 00:00:02.000001'],
['1998-Jan-01 00:00:03.000900', '1998-Jan-01 00:00:04.123456']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01.123', '1998-01-01 00:00:02.000001'],
['1998-01-01 00:00:03.000900', '1998-01-01 00:00:04.123456']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S.%f')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strftime_scalar_fracsec():
"""Test of Time.strftime
"""
time_string = '2010-09-03 06:00:00.123'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == time_string
def test_strftime_scalar_fracsec_precision():
time_string = '2010-09-03 06:00:00.123123123'
t = Time(time_string)
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == '2010-09-03 06:00:00.123'
t.precision = 9
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == '2010-09-03 06:00:00.123123123'
def test_strftime_array_fracsec():
tstrings = ['2010-09-03 00:00:00.123000', '2005-09-03 06:00:00.000001',
'1995-12-31 23:59:60.000900']
t = Time(tstrings)
t.precision = 6
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S.%f').tolist() == tstrings
def test_insert_time():
tm = Time([1, 2], format='unix')
# Insert a scalar using an auto-parsed string
tm2 = tm.insert(1, '1970-01-01 00:01:00')
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert scalar using a Time value
tm2 = tm.insert(1, Time('1970-01-01 00:01:00'))
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert length=1 array with a Time value
tm2 = tm.insert(1, [Time('1970-01-01 00:01:00')])
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert length=2 list with float values matching unix format.
# Also actually provide axis=0 unlike all other tests.
tm2 = tm.insert(1, [10, 20], axis=0)
assert np.all(tm2 == Time([1, 10, 20, 2], format='unix'))
# Insert length=2 np.array with float values matching unix format
tm2 = tm.insert(1, np.array([10, 20]))
assert np.all(tm2 == Time([1, 10, 20, 2], format='unix'))
# Insert length=2 np.array with float values at the end
tm2 = tm.insert(2, np.array([10, 20]))
assert np.all(tm2 == Time([1, 2, 10, 20], format='unix'))
# Insert length=2 np.array with float values at the beginning
# with a negative index
tm2 = tm.insert(-2, np.array([10, 20]))
assert np.all(tm2 == Time([10, 20, 1, 2], format='unix'))
def test_insert_exceptions():
tm = Time(1, format='unix')
with pytest.raises(TypeError) as err:
tm.insert(0, 50)
assert 'cannot insert into scalar' in str(err.value)
tm = Time([1, 2], format='unix')
with pytest.raises(ValueError) as err:
tm.insert(0, 50, axis=1)
assert 'axis must be 0' in str(err.value)
with pytest.raises(TypeError) as err:
tm.insert(slice(None), 50)
assert 'obj arg must be an integer' in str(err.value)
with pytest.raises(IndexError) as err:
tm.insert(-100, 50)
assert 'index -100 is out of bounds for axis 0 with size 2' in str(err.value)
def test_datetime64_no_format():
dt64 = np.datetime64('2000-01-02T03:04:05.123456789')
t = Time(dt64, scale='utc', precision=9)
assert t.iso == '2000-01-02 03:04:05.123456789'
assert t.datetime64 == dt64
assert t.value == dt64
def test_hash_time():
loc1 = EarthLocation(1 * u.m, 2 * u.m, 3 * u.m)
for loc in None, loc1:
t = Time([1, 1, 2, 3], format='cxcsec', location=loc)
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'Time' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'Time' (value is masked)"
t = Time(1, format='cxcsec', location=loc)
t2 = Time(1, format='cxcsec')
assert hash(t) != hash(t2)
t = Time('2000:180', scale='utc')
t2 = Time(t, scale='tai')
assert t == t2
assert hash(t) != hash(t2)
def test_hash_time_delta():
t = TimeDelta([1, 1, 2, 3], format='sec')
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (value is masked)"
def test_get_time_fmt_exception_messages():
with pytest.raises(ValueError) as err:
Time(10)
assert "No time format was given, and the input is" in str(err.value)
with pytest.raises(ValueError) as err:
Time('2000:001', format='not-a-format')
assert "Format 'not-a-format' is not one of the allowed" in str(err.value)
with pytest.raises(ValueError) as err:
Time('200')
assert 'Input values did not match any of the formats where' in str(err.value)
with pytest.raises(ValueError) as err:
Time('200', format='iso')
assert ('Input values did not match the format class iso:' + os.linesep
+ 'ValueError: Time 200 does not match iso format') == str(err.value)
with pytest.raises(ValueError) as err:
Time(200, format='iso')
assert ('Input values did not match the format class iso:' + os.linesep
+ 'TypeError: Input values for iso class must be strings') == str(err.value)
def test_ymdhms_defaults():
t1 = Time({'year': 2001}, format='ymdhms')
assert t1 == Time('2001-01-01')
times_dict_ns = {
'year': [2001, 2002],
'month': [2, 3],
'day': [4, 5],
'hour': [6, 7],
'minute': [8, 9],
'second': [10, 11]
}
table_ns = Table(times_dict_ns)
struct_array_ns = table_ns.as_array()
rec_array_ns = struct_array_ns.view(np.recarray)
ymdhms_names = ('year', 'month', 'day', 'hour', 'minute', 'second')
@pytest.mark.parametrize('tm_input', [table_ns, struct_array_ns, rec_array_ns])
@pytest.mark.parametrize('kwargs', [{}, {'format': 'ymdhms'}])
@pytest.mark.parametrize('as_row', [False, True])
def test_ymdhms_init_from_table_like(tm_input, kwargs, as_row):
time_ns = Time(['2001-02-04 06:08:10', '2002-03-05 07:09:11'])
if as_row:
tm_input = tm_input[0]
time_ns = time_ns[0]
tm = Time(tm_input, **kwargs)
assert np.all(tm == time_ns)
assert tm.value.dtype.names == ymdhms_names
def test_ymdhms_init_from_dict_array():
times_dict_shape = {
'year': [[2001, 2002],
[2003, 2004]],
'month': [2, 3],
'day': 4
}
time_shape = Time(
[['2001-02-04', '2002-03-04'],
['2003-02-04', '2004-03-04']]
)
time = Time(times_dict_shape, format='ymdhms')
assert np.all(time == time_shape)
assert time.ymdhms.shape == time_shape.shape
@pytest.mark.parametrize('kwargs', [{}, {'format': 'ymdhms'}])
def test_ymdhms_init_from_dict_scalar(kwargs):
"""
Test YMDHMS functionality for a dict input. This includes ensuring that
key and attribute access work. For extra fun use a time within a leap
second.
"""
time_dict = {
'year': 2016,
'month': 12,
'day': 31,
'hour': 23,
'minute': 59,
'second': 60.123456789}
tm = Time(time_dict, **kwargs)
assert tm == Time('2016-12-31T23:59:60.123456789')
for attr in time_dict:
for value in (tm.value[attr], getattr(tm.value, attr)):
if attr == 'second':
assert allclose_sec(time_dict[attr], value)
else:
assert time_dict[attr] == value
# Now test initializing from a YMDHMS format time using the object
tm_rt = Time(tm)
assert tm_rt == tm
assert tm_rt.format == 'ymdhms'
# Test initializing from a YMDHMS value (np.void, i.e. recarray row)
# without specified format.
tm_rt = Time(tm.ymdhms)
assert tm_rt == tm
assert tm_rt.format == 'ymdhms'
def test_ymdhms_exceptions():
with pytest.raises(ValueError, match='input must be dict or table-like'):
Time(10, format='ymdhms')
match = "'wrong' not allowed as YMDHMS key name(s)"
# NB: for reasons unknown, using match=match in pytest.raises() fails, so we
# fall back to old school ``match in str(err.value)``.
with pytest.raises(ValueError) as err:
Time({'year': 2019, 'wrong': 1}, format='ymdhms')
assert match in str(err.value)
match = "for 2 input key names you must supply 'year', 'month'"
with pytest.raises(ValueError, match=match):
Time({'year': 2019, 'minute': 1}, format='ymdhms')
def test_ymdhms_masked():
tm = Time({'year': [2000, 2001]}, format='ymdhms')
tm[0] = np.ma.masked
assert isinstance(tm.value[0], np.ma.core.mvoid)
for name in ymdhms_names:
assert tm.value[0][name] is np.ma.masked
# Converted from doctest in astropy/test/formats.py for debugging
def test_ymdhms_output():
t = Time({'year': 2015, 'month': 2, 'day': 3,
'hour': 12, 'minute': 13, 'second': 14.567},
scale='utc')
# NOTE: actually comes back as np.void for some reason
# NOTE: not necessarily a python int; might be an int32
assert t.ymdhms.year == 2015
@pytest.mark.parametrize('fmt', TIME_FORMATS)
def test_write_every_format_to_ecsv(fmt):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = (Time('2020-01-01')
+ [[1, 1 / 7],
[3, 4.5]] * u.s)
tm.format = fmt
t['a'] = tm
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t['a'].format == t2['a'].format
# Some loss of precision in the serialization
assert not np.all(t['a'] == t2['a'])
# But no loss in the format representation
assert np.all(t['a'].value == t2['a'].value)
@pytest.mark.parametrize('fmt', TIME_FORMATS)
def test_write_every_format_to_fits(fmt, tmp_path):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = (Time('2020-01-01')
+ [[1, 1 / 7],
[3, 4.5]] * u.s)
tm.format = fmt
t['a'] = tm
out = tmp_path / 'out.fits'
t.write(out, format='fits')
t2 = Table.read(out, format='fits', astropy_native=True)
# Currently the format is lost in FITS so set it back
t2['a'].format = fmt
# No loss of precision in the serialization or representation
assert np.all(t['a'] == t2['a'])
assert np.all(t['a'].value == t2['a'].value)
@pytest.mark.skipif(not HAS_H5PY, reason='Needs h5py')
@pytest.mark.parametrize('fmt', TIME_FORMATS)
def test_write_every_format_to_hdf5(fmt, tmp_path):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = (Time('2020-01-01')
+ [[1, 1 / 7],
[3, 4.5]] * u.s)
tm.format = fmt
t['a'] = tm
out = tmp_path / 'out.h5'
t.write(str(out), format='hdf5', path='root', serialize_meta=True)
t2 = Table.read(str(out), format='hdf5', path='root')
assert t['a'].format == t2['a'].format
# No loss of precision in the serialization or representation
assert np.all(t['a'] == t2['a'])
assert np.all(t['a'].value == t2['a'].value)
# There are two stages of validation now - one on input into a format, so that
# the format conversion code has tidy matched arrays to work with, and the
# other when object construction does not go through a format object. Or at
# least, the format object is constructed with "from_jd=True". In this case the
# normal input validation does not happen but the new input validation does,
# and can ensure that strange broadcasting anomalies can't happen.
# This form of construction uses from_jd=True.
def test_broadcasting_writeable():
t = Time('J2015') + np.linspace(-1, 1, 10) * u.day
t[2] = Time(58000, format="mjd")
def test_format_subformat_compatibility():
"""Test that changing format with out_subfmt defined is not a problem.
See #9812, #9810."""
t = Time('2019-12-20', out_subfmt='date_??')
assert t.mjd == 58837.0
assert t.yday == '2019:354:00:00' # Preserves out_subfmt
t2 = t.replicate(format='mjd')
assert t2.out_subfmt == '*' # Changes to default
t2 = t.copy(format='mjd')
assert t2.out_subfmt == '*'
t2 = Time(t, format='mjd')
assert t2.out_subfmt == '*'
t2 = t.copy(format='yday')
assert t2.out_subfmt == 'date_??'
assert t2.value == '2019:354:00:00'
t.format = 'yday'
assert t.value == '2019:354:00:00'
assert t.out_subfmt == 'date_??'
t = Time('2019-12-20', out_subfmt='date')
assert t.mjd == 58837.0
assert t.yday == '2019:354'
@pytest.mark.parametrize('fmt_name,fmt_class', TIME_FORMATS.items())
def test_to_value_with_subfmt_for_every_format(fmt_name, fmt_class):
"""From a starting Time value, test that every valid combination of
to_value(format, subfmt) works. See #9812, #9361.
"""
t = Time('2000-01-01')
subfmts = list(subfmt[0] for subfmt in fmt_class.subfmts) + [None, '*']
for subfmt in subfmts:
t.to_value(fmt_name, subfmt)
@pytest.mark.parametrize('location', [None, (45, 45)])
def test_location_init(location):
"""Test fix in #9969 for issue #9962 where the location attribute is
lost when initializing Time from an existing Time instance of list of
Time instances.
"""
tm = Time('J2010', location=location)
# Init from a scalar Time
tm2 = Time(tm)
assert np.all(tm.location == tm2.location)
assert type(tm.location) is type(tm2.location) # noqa
# From a list of Times
tm2 = Time([tm, tm])
if location is None:
assert tm2.location is None
else:
for loc in tm2.location:
assert loc == tm.location
assert type(tm.location) is type(tm2.location) # noqa
# Effectively the same as a list of Times, but just to be sure that
# Table mixin inititialization is working as expected.
tm2 = Table([[tm, tm]])['col0']
if location is None:
assert tm2.location is None
else:
for loc in tm2.location:
assert loc == tm.location
assert type(tm.location) is type(tm2.location) # noqa
def test_location_init_fail():
"""Test fix in #9969 for issue #9962 where the location attribute is
lost when initializing Time from an existing Time instance of list of
Time instances. Make sure exception is correct.
"""
tm = Time('J2010', location=(45, 45))
tm2 = Time('J2010')
with pytest.raises(ValueError,
match='cannot concatenate times unless all locations'):
Time([tm, tm2])
def test_linspace():
"""Test `np.linspace` `__array_func__` implementation for scalar and arrays.
"""
t1 = Time(['2021-01-01 00:00:00', '2021-01-02 00:00:00'])
t2 = Time(['2021-01-01 01:00:00', '2021-12-28 00:00:00'])
atol = 1 * u.ps
ts = np.linspace(t1[0], t2[0], 3)
assert ts[0].isclose(Time('2021-01-01 00:00:00'), atol=atol)
assert ts[1].isclose(Time('2021-01-01 00:30:00'), atol=atol)
assert ts[2].isclose(Time('2021-01-01 01:00:00'), atol=atol)
ts = np.linspace(t1, t2[0], 2, endpoint=False)
assert ts.shape == (2, 2)
assert all(ts[0].isclose(Time(['2021-01-01 00:00:00', '2021-01-02 00:00:00']), atol=atol))
assert all(ts[1].isclose(Time(['2021-01-01 00:30:00', '2021-01-01 12:30:00']), atol=atol*10))
ts = np.linspace(t1, t2, 7)
assert ts.shape == (7, 2)
assert all(ts[0].isclose(Time(['2021-01-01 00:00:00', '2021-01-02 00:00:00']), atol=atol))
assert all(ts[1].isclose(Time(['2021-01-01 00:10:00', '2021-03-03 00:00:00']), atol=atol*300))
assert all(ts[5].isclose(Time(['2021-01-01 00:50:00', '2021-10-29 00:00:00']), atol=atol*3000))
assert all(ts[6].isclose(Time(['2021-01-01 01:00:00', '2021-12-28 00:00:00']), atol=atol))
def test_linspace_steps():
"""Test `np.linspace` `retstep` option.
"""
t1 = Time(['2021-01-01 00:00:00', '2021-01-01 12:00:00'])
t2 = Time('2021-01-02 00:00:00')
atol = 1 * u.ps
ts, st = np.linspace(t1, t2, 7, retstep=True)
assert ts.shape == (7, 2)
assert st.shape == (2,)
assert all(ts[1].isclose(ts[0] + st, atol=atol))
assert all(ts[6].isclose(ts[0] + 6 * st, atol=atol))
assert all(st.isclose(TimeDelta([14400, 7200], format='sec'), atol=atol))
def test_linspace_fmts():
"""Test `np.linspace` `__array_func__` implementation for start/endpoints
from different formats/systems.
"""
t1 = Time(['2020-01-01 00:00:00', '2020-01-02 00:00:00'])
t2 = Time(2458850, format='jd')
t3 = Time(1578009600, format='unix')
atol = 1 * u.ps
ts = np.linspace(t1, t2, 3)
assert ts.shape == (3, 2)
assert all(ts[0].isclose(Time(['2020-01-01 00:00:00', '2020-01-02 00:00:00']), atol=atol))
assert all(ts[1].isclose(Time(['2020-01-01 06:00:00', '2020-01-01 18:00:00']), atol=atol))
assert all(ts[2].isclose(Time(['2020-01-01 12:00:00', '2020-01-01 12:00:00']), atol=atol))
ts = np.linspace(t1, Time([t2, t3]), 3)
assert ts.shape == (3, 2)
assert all(ts[0].isclose(Time(['2020-01-01 00:00:00', '2020-01-02 00:00:00']), atol=atol))
assert all(ts[1].isclose(Time(['2020-01-01 06:00:00', '2020-01-02 12:00:00']), atol=atol))
assert all(ts[2].isclose(Time(['2020-01-01 12:00:00', '2020-01-03 00:00:00']), atol=atol))
|
bbd40d1b510a9ad04b3448eb0bbcce3fe53109c761db2338c659fa8f2374d7d4 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICNSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""Handles the CDS string format for units."""
import operator
import os
import re
from astropy.units.utils import is_effectively_unity
from astropy.utils import classproperty, parsing
from astropy.utils.misc import did_you_mean
from . import core, utils
from .base import Base
class CDS(Base):
"""
Support the `Centre de Données astronomiques de Strasbourg
<http://cds.u-strasbg.fr/>`_ `Standards for Astronomical
Catalogues 2.0 <http://vizier.u-strasbg.fr/vizier/doc/catstd-3.2.htx>`_
format, and the `complete set of supported units
<https://vizier.u-strasbg.fr/viz-bin/Unit>`_. This format is used
by VOTable up to version 1.2.
"""
_tokens = (
'PRODUCT',
'DIVISION',
'OPEN_PAREN',
'CLOSE_PAREN',
'OPEN_BRACKET',
'CLOSE_BRACKET',
'X',
'SIGN',
'UINT',
'UFLOAT',
'UNIT',
'DIMENSIONLESS'
)
@classproperty(lazy=True)
def _units(cls):
return cls._generate_unit_names()
@classproperty(lazy=True)
def _parser(cls):
return cls._make_parser()
@classproperty(lazy=True)
def _lexer(cls):
return cls._make_lexer()
@staticmethod
def _generate_unit_names():
from astropy import units as u
from astropy.units import cds
names = {}
for key, val in cds.__dict__.items():
if isinstance(val, u.UnitBase):
names[key] = val
return names
@classmethod
def _make_lexer(cls):
tokens = cls._tokens
t_PRODUCT = r'\.'
t_DIVISION = r'/'
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
t_OPEN_BRACKET = r'\['
t_CLOSE_BRACKET = r'\]'
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r'((\d+\.?\d+)|(\.\d+))([eE][+-]?\d+)?'
if not re.search(r'[eE\.]', t.value):
t.type = 'UINT'
t.value = int(t.value)
else:
t.value = float(t.value)
return t
def t_UINT(t):
r'\d+'
t.value = int(t.value)
return t
def t_SIGN(t):
r'[+-](?=\d)'
t.value = float(t.value + '1')
return t
def t_X(t): # multiplication for factor in front of unit
r'[x×]'
return t
def t_UNIT(t):
r'\%|°|\\h|((?!\d)\w)+'
t.value = cls._get_unit(t)
return t
def t_DIMENSIONLESS(t):
r'---|-'
# These are separate from t_UNIT since they cannot have a prefactor.
t.value = cls._get_unit(t)
return t
t_ignore = ''
# Error handling rule
def t_error(t):
raise ValueError(
f"Invalid character at col {t.lexpos}")
return parsing.lex(lextab='cds_lextab', package='astropy/units',
reflags=int(re.UNICODE))
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the `Standards
for Astronomical Catalogues 2.0
<http://vizier.u-strasbg.fr/vizier/doc/catstd-3.2.htx>`_, which is not
terribly precise. The exact grammar is here is based on the
YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
"""
tokens = cls._tokens
def p_main(p):
'''
main : factor combined_units
| combined_units
| DIMENSIONLESS
| OPEN_BRACKET combined_units CLOSE_BRACKET
| OPEN_BRACKET DIMENSIONLESS CLOSE_BRACKET
| factor
'''
from astropy.units import dex
from astropy.units.core import Unit
if len(p) == 3:
p[0] = Unit(p[1] * p[2])
elif len(p) == 4:
p[0] = dex(p[2])
else:
p[0] = Unit(p[1])
def p_combined_units(p):
'''
combined_units : product_of_units
| division_of_units
'''
p[0] = p[1]
def p_product_of_units(p):
'''
product_of_units : unit_expression PRODUCT combined_units
| unit_expression
'''
if len(p) == 4:
p[0] = p[1] * p[3]
else:
p[0] = p[1]
def p_division_of_units(p):
'''
division_of_units : DIVISION unit_expression
| unit_expression DIVISION combined_units
'''
if len(p) == 3:
p[0] = p[2] ** -1
else:
p[0] = p[1] / p[3]
def p_unit_expression(p):
'''
unit_expression : unit_with_power
| OPEN_PAREN combined_units CLOSE_PAREN
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
def p_factor(p):
'''
factor : signed_float X UINT signed_int
| UINT X UINT signed_int
| UINT signed_int
| UINT
| signed_float
'''
if len(p) == 5:
if p[3] != 10:
raise ValueError(
"Only base ten exponents are allowed in CDS")
p[0] = p[1] * 10.0 ** p[4]
elif len(p) == 3:
if p[1] != 10:
raise ValueError(
"Only base ten exponents are allowed in CDS")
p[0] = 10.0 ** p[2]
elif len(p) == 2:
p[0] = p[1]
def p_unit_with_power(p):
'''
unit_with_power : UNIT numeric_power
| UNIT
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[1] ** p[2]
def p_numeric_power(p):
'''
numeric_power : sign UINT
'''
p[0] = p[1] * p[2]
def p_sign(p):
'''
sign : SIGN
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_signed_int(p):
'''
signed_int : SIGN UINT
'''
p[0] = p[1] * p[2]
def p_signed_float(p):
'''
signed_float : sign UINT
| sign UFLOAT
'''
p[0] = p[1] * p[2]
def p_error(p):
raise ValueError()
return parsing.yacc(tabmodule='cds_parsetab', package='astropy/units')
@classmethod
def _get_unit(cls, t):
try:
return cls._parse_unit(t.value)
except ValueError as e:
registry = core.get_current_unit_registry()
if t.value in registry.aliases:
return registry.aliases[t.value]
raise ValueError(
f"At col {t.lexpos}, {str(e)}")
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
"Unit '{}' not supported by the CDS SAC "
"standard. {}".format(
unit, did_you_mean(
unit, cls._units)))
else:
raise ValueError()
return cls._units[unit]
@classmethod
def parse(cls, s, debug=False):
if ' ' in s:
raise ValueError('CDS unit must not contain whitespace')
if not isinstance(s, str):
s = s.decode('ascii')
# This is a short circuit for the case where the string
# is just a single unit name
try:
return cls._parse_unit(s, detailed_exception=False)
except ValueError:
try:
return cls._parser.parse(s, lexer=cls._lexer, debug=debug)
except ValueError as e:
if str(e):
raise ValueError(str(e))
else:
raise ValueError("Syntax error")
@staticmethod
def _get_unit_name(unit):
return unit.get_format_name('cds')
@classmethod
def _format_unit_list(cls, units):
out = []
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
out.append(f'{cls._get_unit_name(base)}{int(power)}')
return '.'.join(out)
@classmethod
def to_string(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
if unit == core.dimensionless_unscaled:
return '---'
elif is_effectively_unity(unit.scale*100.):
return '%'
if unit.scale == 1:
s = ''
else:
m, e = utils.split_mantissa_exponent(unit.scale)
parts = []
if m not in ('', '1'):
parts.append(m)
if e:
if not e.startswith('-'):
e = "+" + e
parts.append(f'10{e}')
s = 'x'.join(parts)
pairs = list(zip(unit.bases, unit.powers))
if len(pairs) > 0:
pairs.sort(key=operator.itemgetter(1), reverse=True)
s += cls._format_unit_list(pairs)
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
|
1ecf37e60c54c53e157bed2135bf4508f5f5c7f286afbde8c056e815a6ad25b4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A collection of different unit formats.
"""
# This is pretty atrocious, but it will prevent a circular import for those
# formatters that need access to the units.core module An entry for it should
# exist in sys.modules since astropy.units.core imports this module
import sys
core = sys.modules['astropy.units.core']
from .base import Base # noqa
from .cds import CDS # noqa
from .console import Console # noqa
from .fits import Fits # noqa
from .generic import Generic, Unscaled # noqa
from .latex import Latex, LatexInline # noqa
from .ogip import OGIP # noqa
from .unicode_format import Unicode # noqa
from .vounit import VOUnit # noqa
__all__ = [
'Base', 'Generic', 'CDS', 'Console', 'Fits', 'Latex', 'LatexInline',
'OGIP', 'Unicode', 'Unscaled', 'VOUnit', 'get_format']
def _known_formats():
inout = [name for name, cls in Base.registry.items()
if cls.parse.__func__ is not Base.parse.__func__]
out_only = [name for name, cls in Base.registry.items()
if cls.parse.__func__ is Base.parse.__func__]
return (f"Valid formatter names are: {inout} for input and output, "
f"and {out_only} for output only.")
def get_format(format=None):
"""
Get a formatter by name.
Parameters
----------
format : str or `astropy.units.format.Base` instance or subclass
The name of the format, or the format instance or subclass
itself.
Returns
-------
format : `astropy.units.format.Base` instance
The requested formatter.
"""
if format is None:
return Generic
if isinstance(format, type) and issubclass(format, Base):
return format
elif not (isinstance(format, str) or format is None):
raise TypeError(
f"Formatter must a subclass or instance of a subclass of {Base!r} "
f"or a string giving the name of the formatter. {_known_formats()}.")
format_lower = format.lower()
if format_lower in Base.registry:
return Base.registry[format_lower]
raise ValueError(f"Unknown format {format!r}. {_known_formats()}")
|
c91ea701086511eb32ce9a0e4a844d096e4c8c05821a1d1401453a4d9613b95a | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICNSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
Handles units in `Office of Guest Investigator Programs (OGIP)
FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__.
"""
import copy
import keyword
import math
import os
import warnings
from fractions import Fraction
from astropy.utils import parsing
from . import core, generic, utils
class OGIP(generic.Generic):
"""
Support the units in `Office of Guest Investigator Programs (OGIP)
FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__.
"""
_tokens = (
'DIVISION',
'OPEN_PAREN',
'CLOSE_PAREN',
'WHITESPACE',
'STARSTAR',
'STAR',
'SIGN',
'UFLOAT',
'LIT10',
'UINT',
'UNKNOWN',
'UNIT'
)
@staticmethod
def _generate_unit_names():
from astropy import units as u
names = {}
deprecated_names = set()
bases = [
'A', 'C', 'cd', 'eV', 'F', 'g', 'H', 'Hz', 'J',
'Jy', 'K', 'lm', 'lx', 'm', 'mol', 'N', 'ohm', 'Pa',
'pc', 'rad', 's', 'S', 'sr', 'T', 'V', 'W', 'Wb'
]
deprecated_bases = []
prefixes = [
'y', 'z', 'a', 'f', 'p', 'n', 'u', 'm', 'c', 'd',
'', 'da', 'h', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'
]
for base in bases + deprecated_bases:
for prefix in prefixes:
key = prefix + base
if keyword.iskeyword(key):
continue
names[key] = getattr(u, key)
for base in deprecated_bases:
for prefix in prefixes:
deprecated_names.add(prefix + base)
simple_units = [
'angstrom', 'arcmin', 'arcsec', 'AU', 'barn', 'bin',
'byte', 'chan', 'count', 'day', 'deg', 'erg', 'G',
'h', 'lyr', 'mag', 'min', 'photon', 'pixel',
'voxel', 'yr'
]
for unit in simple_units:
names[unit] = getattr(u, unit)
# Create a separate, disconnected unit for the special case of
# Crab and mCrab, since OGIP doesn't define their quantities.
Crab = u.def_unit(['Crab'], prefixes=False, doc='Crab (X-ray flux)')
mCrab = u.Unit(10 ** -3 * Crab)
names['Crab'] = Crab
names['mCrab'] = mCrab
deprecated_units = ['Crab', 'mCrab']
for unit in deprecated_units:
deprecated_names.add(unit)
# Define the function names, so we can parse them, even though
# we can't use any of them (other than sqrt) meaningfully for
# now.
functions = [
'log', 'ln', 'exp', 'sqrt', 'sin', 'cos', 'tan', 'asin',
'acos', 'atan', 'sinh', 'cosh', 'tanh'
]
for name in functions:
names[name] = name
return names, deprecated_names, functions
@classmethod
def _make_lexer(cls):
tokens = cls._tokens
t_DIVISION = r'/'
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
t_WHITESPACE = '[ \t]+'
t_STARSTAR = r'\*\*'
t_STAR = r'\*'
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r'(((\d+\.?\d*)|(\.\d+))([eE][+-]?\d+))|(((\d+\.\d*)|(\.\d+))([eE][+-]?\d+)?)'
t.value = float(t.value)
return t
def t_UINT(t):
r'\d+'
t.value = int(t.value)
return t
def t_SIGN(t):
r'[+-](?=\d)'
t.value = float(t.value + '1')
return t
def t_X(t): # multiplication for factor in front of unit
r'[x×]'
return t
def t_LIT10(t):
r'10'
return 10
def t_UNKNOWN(t):
r'[Uu][Nn][Kk][Nn][Oo][Ww][Nn]'
return None
def t_UNIT(t):
r'[a-zA-Z][a-zA-Z_]*'
t.value = cls._get_unit(t)
return t
# Don't ignore whitespace
t_ignore = ''
# Error handling rule
def t_error(t):
raise ValueError(
f"Invalid character at col {t.lexpos}")
return parsing.lex(lextab='ogip_lextab', package='astropy/units')
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the
`Specification of Physical Units within OGIP FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__,
which is not terribly precise. The exact grammar is here is
based on the YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
"""
tokens = cls._tokens
def p_main(p):
'''
main : UNKNOWN
| complete_expression
| scale_factor complete_expression
| scale_factor WHITESPACE complete_expression
'''
if len(p) == 4:
p[0] = p[1] * p[3]
elif len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1]
def p_complete_expression(p):
'''
complete_expression : product_of_units
'''
p[0] = p[1]
def p_product_of_units(p):
'''
product_of_units : unit_expression
| division unit_expression
| product_of_units product unit_expression
| product_of_units division unit_expression
'''
if len(p) == 4:
if p[2] == 'DIVISION':
p[0] = p[1] / p[3]
else:
p[0] = p[1] * p[3]
elif len(p) == 3:
p[0] = p[2] ** -1
else:
p[0] = p[1]
def p_unit_expression(p):
'''
unit_expression : unit
| UNIT OPEN_PAREN complete_expression CLOSE_PAREN
| OPEN_PAREN complete_expression CLOSE_PAREN
| UNIT OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power
| OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power
'''
# If we run p[1] in cls._functions, it will try and parse each
# item in the list into a unit, which is slow. Since we know that
# all the items in the list are strings, we can simply convert
# p[1] to a string instead.
p1_str = str(p[1])
if p1_str in cls._functions and p1_str != 'sqrt':
raise ValueError(
"The function '{}' is valid in OGIP, but not understood "
"by astropy.units.".format(
p[1]))
if len(p) == 7:
if p1_str == 'sqrt':
p[0] = p[1] * p[3] ** (0.5 * p[6])
else:
p[0] = p[1] * p[3] ** p[6]
elif len(p) == 6:
p[0] = p[2] ** p[5]
elif len(p) == 5:
if p1_str == 'sqrt':
p[0] = p[3] ** 0.5
else:
p[0] = p[1] * p[3]
elif len(p) == 4:
p[0] = p[2]
else:
p[0] = p[1]
def p_scale_factor(p):
'''
scale_factor : LIT10 power numeric_power
| LIT10
| signed_float
| signed_float power numeric_power
| signed_int power numeric_power
'''
if len(p) == 4:
p[0] = 10 ** p[3]
else:
p[0] = p[1]
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(p[0]) % 1.0 != 0.0:
from astropy.units.core import UnitsWarning
warnings.warn(
"'{}' scale should be a power of 10 in "
"OGIP format".format(p[0]), UnitsWarning)
def p_division(p):
'''
division : DIVISION
| WHITESPACE DIVISION
| WHITESPACE DIVISION WHITESPACE
| DIVISION WHITESPACE
'''
p[0] = 'DIVISION'
def p_product(p):
'''
product : WHITESPACE
| STAR
| WHITESPACE STAR
| WHITESPACE STAR WHITESPACE
| STAR WHITESPACE
'''
p[0] = 'PRODUCT'
def p_power(p):
'''
power : STARSTAR
'''
p[0] = 'POWER'
def p_unit(p):
'''
unit : UNIT
| UNIT power numeric_power
'''
if len(p) == 4:
p[0] = p[1] ** p[3]
else:
p[0] = p[1]
def p_numeric_power(p):
'''
numeric_power : UINT
| signed_float
| OPEN_PAREN signed_int CLOSE_PAREN
| OPEN_PAREN signed_float CLOSE_PAREN
| OPEN_PAREN signed_float division UINT CLOSE_PAREN
'''
if len(p) == 6:
p[0] = Fraction(int(p[2]), int(p[4]))
elif len(p) == 4:
p[0] = p[2]
else:
p[0] = p[1]
def p_sign(p):
'''
sign : SIGN
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_signed_int(p):
'''
signed_int : SIGN UINT
'''
p[0] = p[1] * p[2]
def p_signed_float(p):
'''
signed_float : sign UINT
| sign UFLOAT
'''
p[0] = p[1] * p[2]
def p_error(p):
raise ValueError()
return parsing.yacc(tabmodule='ogip_parsetab', package='astropy/units')
@classmethod
def _validate_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
"Unit '{}' not supported by the OGIP "
"standard. {}".format(
unit, utils.did_you_mean_units(
unit, cls._units, cls._deprecated_units,
cls._to_decomposed_alternative)))
else:
raise ValueError()
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], 'OGIP',
cls._to_decomposed_alternative)
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
cls._validate_unit(unit, detailed_exception=detailed_exception)
return cls._units[unit]
@classmethod
def parse(cls, s, debug=False):
s = s.strip()
try:
# This is a short circuit for the case where the string is
# just a single unit name
return cls._parse_unit(s, detailed_exception=False)
except ValueError:
try:
return core.Unit(
cls._parser.parse(s, lexer=cls._lexer, debug=debug))
except ValueError as e:
if str(e):
raise
else:
raise ValueError(
f"Syntax error parsing unit '{s}'")
@classmethod
def _get_unit_name(cls, unit):
name = unit.get_format_name('ogip')
cls._validate_unit(name)
return name
@classmethod
def _format_unit_list(cls, units):
out = []
units.sort(key=lambda x: cls._get_unit_name(x[0]).lower())
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
power = utils.format_power(power)
if '/' in power:
out.append(f'{cls._get_unit_name(base)}**({power})')
else:
out.append(f'{cls._get_unit_name(base)}**{power}')
return ' '.join(out)
@classmethod
def to_string(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(unit.scale) % 1.0 != 0.0:
warnings.warn(
f"'{unit.scale}' scale should be a power of 10 in OGIP format",
core.UnitsWarning)
return generic._to_string(cls, unit)
@classmethod
def _to_decomposed_alternative(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(unit.scale) % 1.0 != 0.0:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return '{} (with data multiplied by {})'.format(
generic._to_string(cls, unit), scale)
return generic._to_string(unit)
|
a18136f41fa8716b3b1519d3635d8c1e21c9e530c39c89eb807ea8c2ebc18ff4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities shared by the different formats.
"""
import warnings
from fractions import Fraction
from astropy.utils.misc import did_you_mean
from ..utils import maybe_simple_fraction
def get_grouped_by_powers(bases, powers):
"""
Groups the powers and bases in the given
`~astropy.units.CompositeUnit` into positive powers and
negative powers for easy display on either side of a solidus.
Parameters
----------
bases : list of `astropy.units.UnitBase` instances
powers : list of int
Returns
-------
positives, negatives : tuple of lists
Each element in each list is tuple of the form (*base*,
*power*). The negatives have the sign of their power reversed
(i.e. the powers are all positive).
"""
positive = []
negative = []
for base, power in zip(bases, powers):
if power < 0:
negative.append((base, -power))
elif power > 0:
positive.append((base, power))
else:
raise ValueError("Unit with 0 power")
return positive, negative
def split_mantissa_exponent(v, format_spec=".8g"):
"""
Given a number, split it into its mantissa and base 10 exponent
parts, each as strings. If the exponent is too small, it may be
returned as the empty string.
Parameters
----------
v : float
format_spec : str, optional
Number representation formatting string
Returns
-------
mantissa, exponent : tuple of strings
"""
x = format(v, format_spec).split('e')
if x[0] != '1.' + '0' * (len(x[0]) - 2):
m = x[0]
else:
m = ''
if len(x) == 2:
ex = x[1].lstrip("0+")
if len(ex) > 0 and ex[0] == '-':
ex = '-' + ex[1:].lstrip('0')
else:
ex = ''
return m, ex
def decompose_to_known_units(unit, func):
"""
Partially decomposes a unit so it is only composed of units that
are "known" to a given format.
Parameters
----------
unit : `~astropy.units.UnitBase` instance
func : callable
This function will be called to determine if a given unit is
"known". If the unit is not known, this function should raise a
`ValueError`.
Returns
-------
unit : `~astropy.units.UnitBase` instance
A flattened unit.
"""
from astropy.units import core
if isinstance(unit, core.CompositeUnit):
new_unit = core.Unit(unit.scale)
for base, power in zip(unit.bases, unit.powers):
new_unit = new_unit * decompose_to_known_units(base, func) ** power
return new_unit
elif isinstance(unit, core.NamedUnit):
try:
func(unit)
except ValueError:
if isinstance(unit, core.Unit):
return decompose_to_known_units(unit._represents, func)
raise
return unit
else:
raise TypeError("unit argument must be a 'NamedUnit' or 'CompositeUnit', "
f"not {type(unit)}")
def format_power(power):
"""
Converts a value for a power (which may be floating point or a
`fractions.Fraction` object), into a string looking like either
an integer or a fraction, if the power is close to that.
"""
if not hasattr(power, 'denominator'):
power = maybe_simple_fraction(power)
if getattr(power, 'denonimator', None) == 1:
power = power.nominator
return str(power)
def _try_decomposed(unit, format_decomposed):
represents = getattr(unit, '_represents', None)
if represents is not None:
try:
represents_string = format_decomposed(represents)
except ValueError:
pass
else:
return represents_string
decomposed = unit.decompose()
if decomposed is not unit:
try:
decompose_string = format_decomposed(decomposed)
except ValueError:
pass
else:
return decompose_string
return None
def did_you_mean_units(s, all_units, deprecated_units, format_decomposed):
"""
A wrapper around `astropy.utils.misc.did_you_mean` that deals with
the display of deprecated units.
Parameters
----------
s : str
The invalid unit string
all_units : dict
A mapping from valid unit names to unit objects.
deprecated_units : sequence
The deprecated unit names
format_decomposed : callable
A function to turn a decomposed version of the unit into a
string. Should return `None` if not possible
Returns
-------
msg : str
A string message with a list of alternatives, or the empty
string.
"""
def fix_deprecated(x):
if x in deprecated_units:
results = [x + ' (deprecated)']
decomposed = _try_decomposed(
all_units[x], format_decomposed)
if decomposed is not None:
results.append(decomposed)
return results
return (x,)
return did_you_mean(s, all_units, fix=fix_deprecated)
def unit_deprecation_warning(s, unit, standard_name, format_decomposed):
"""
Raises a UnitsWarning about a deprecated unit in a given format.
Suggests a decomposed alternative if one is available.
Parameters
----------
s : str
The deprecated unit name.
unit : astropy.units.core.UnitBase
The unit object.
standard_name : str
The name of the format for which the unit is deprecated.
format_decomposed : callable
A function to turn a decomposed version of the unit into a
string. Should return `None` if not possible
"""
from astropy.units.core import UnitsWarning
message = f"The unit '{s}' has been deprecated in the {standard_name} standard."
decomposed = _try_decomposed(unit, format_decomposed)
if decomposed is not None:
message += f" Suggested: {decomposed}."
warnings.warn(message, UnitsWarning)
|
88c24be6b0909c0510ec466c9c352205040c52f08f78f1dcc65331e725d2fe39 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "FITS" unit format.
"""
import copy
import keyword
import operator
import numpy as np
from . import core, generic, utils
class Fits(generic.Generic):
"""
The FITS standard unit format.
This supports the format defined in the Units section of the `FITS
Standard <https://fits.gsfc.nasa.gov/fits_standard.html>`_.
"""
name = 'fits'
@staticmethod
def _generate_unit_names():
from astropy import units as u
names = {}
deprecated_names = set()
# Note about deprecated units: before v2.0, several units were treated
# as deprecated (G, barn, erg, Angstrom, angstrom). However, in the
# FITS 3.0 standard, these units are explicitly listed in the allowed
# units, but deprecated in the IAU Style Manual (McNally 1988). So
# after discussion (https://github.com/astropy/astropy/issues/2933),
# these units have been removed from the lists of deprecated units and
# bases.
bases = [
'm', 'g', 's', 'rad', 'sr', 'K', 'A', 'mol', 'cd',
'Hz', 'J', 'W', 'V', 'N', 'Pa', 'C', 'Ohm', 'S',
'F', 'Wb', 'T', 'H', 'lm', 'lx', 'a', 'yr', 'eV',
'pc', 'Jy', 'mag', 'R', 'bit', 'byte', 'G', 'barn'
]
deprecated_bases = []
prefixes = [
'y', 'z', 'a', 'f', 'p', 'n', 'u', 'm', 'c', 'd',
'', 'da', 'h', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
special_cases = {'dbyte': u.Unit('dbyte', 0.1*u.byte)}
for base in bases + deprecated_bases:
for prefix in prefixes:
key = prefix + base
if keyword.iskeyword(key):
continue
elif key in special_cases:
names[key] = special_cases[key]
else:
names[key] = getattr(u, key)
for base in deprecated_bases:
for prefix in prefixes:
deprecated_names.add(prefix + base)
simple_units = [
'deg', 'arcmin', 'arcsec', 'mas', 'min', 'h', 'd', 'Ry',
'solMass', 'u', 'solLum', 'solRad', 'AU', 'lyr', 'count',
'ct', 'photon', 'ph', 'pixel', 'pix', 'D', 'Sun', 'chan',
'bin', 'voxel', 'adu', 'beam', 'erg', 'Angstrom', 'angstrom'
]
deprecated_units = []
for unit in simple_units + deprecated_units:
names[unit] = getattr(u, unit)
for unit in deprecated_units:
deprecated_names.add(unit)
return names, deprecated_names, []
@classmethod
def _validate_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
"Unit '{}' not supported by the FITS standard. {}".format(
unit, utils.did_you_mean_units(
unit, cls._units, cls._deprecated_units,
cls._to_decomposed_alternative)))
else:
raise ValueError()
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], 'FITS',
cls._to_decomposed_alternative)
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
cls._validate_unit(unit)
return cls._units[unit]
@classmethod
def _get_unit_name(cls, unit):
name = unit.get_format_name('fits')
cls._validate_unit(name)
return name
@classmethod
def to_string(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
parts = []
if isinstance(unit, core.CompositeUnit):
base = np.log10(unit.scale)
if base % 1.0 != 0.0:
raise core.UnitScaleError(
"The FITS unit format is not able to represent scales "
"that are not powers of 10. Multiply your data by "
"{:e}.".format(unit.scale))
elif unit.scale != 1.0:
parts.append(f'10**{int(base)}')
pairs = list(zip(unit.bases, unit.powers))
if len(pairs):
pairs.sort(key=operator.itemgetter(1), reverse=True)
parts.append(cls._format_unit_list(pairs))
s = ' '.join(parts)
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
@classmethod
def _to_decomposed_alternative(cls, unit):
try:
s = cls.to_string(unit)
except core.UnitScaleError:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return f'{cls.to_string(unit)} (with data multiplied by {scale})'
return s
@classmethod
def parse(cls, s, debug=False):
result = super().parse(s, debug)
if hasattr(result, 'function_unit'):
raise ValueError("Function units are not yet supported for "
"FITS units.")
return result
|
282a20a457a2b6d6db6c61a1e968466ef17236e18b819aaf2de16c4d16e98cac | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
Handles a "generic" string format for units
"""
import re
import unicodedata
import warnings
from fractions import Fraction
from astropy.utils import classproperty, parsing
from astropy.utils.misc import did_you_mean
from . import core, utils
from .base import Base
def _to_string(cls, unit):
if isinstance(unit, core.CompositeUnit):
parts = []
if cls._show_scale and unit.scale != 1:
parts.append(f'{unit.scale:g}')
if len(unit.bases):
positives, negatives = utils.get_grouped_by_powers(
unit.bases, unit.powers)
if len(positives):
parts.append(cls._format_unit_list(positives))
elif len(parts) == 0:
parts.append('1')
if len(negatives):
parts.append('/')
unit_list = cls._format_unit_list(negatives)
if len(negatives) == 1:
parts.append(f'{unit_list}')
else:
parts.append(f'({unit_list})')
return ' '.join(parts)
elif isinstance(unit, core.NamedUnit):
return cls._get_unit_name(unit)
class Generic(Base):
"""
A "generic" format.
The syntax of the format is based directly on the FITS standard,
but instead of only supporting the units that FITS knows about, it
supports any unit available in the `astropy.units` namespace.
"""
_show_scale = True
_tokens = (
'COMMA',
'DOUBLE_STAR',
'STAR',
'PERIOD',
'SOLIDUS',
'CARET',
'OPEN_PAREN',
'CLOSE_PAREN',
'FUNCNAME',
'UNIT',
'SIGN',
'UINT',
'UFLOAT'
)
@classproperty(lazy=True)
def _all_units(cls):
return cls._generate_unit_names()
@classproperty(lazy=True)
def _units(cls):
return cls._all_units[0]
@classproperty(lazy=True)
def _deprecated_units(cls):
return cls._all_units[1]
@classproperty(lazy=True)
def _functions(cls):
return cls._all_units[2]
@classproperty(lazy=True)
def _parser(cls):
return cls._make_parser()
@classproperty(lazy=True)
def _lexer(cls):
return cls._make_lexer()
@classmethod
def _make_lexer(cls):
tokens = cls._tokens
t_COMMA = r'\,'
t_STAR = r'\*'
t_PERIOD = r'\.'
t_SOLIDUS = r'/'
t_DOUBLE_STAR = r'\*\*'
t_CARET = r'\^'
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r'((\d+\.?\d*)|(\.\d+))([eE][+-]?\d+)?'
if not re.search(r'[eE\.]', t.value):
t.type = 'UINT'
t.value = int(t.value)
elif t.value.endswith('.'):
t.type = 'UINT'
t.value = int(t.value[:-1])
else:
t.value = float(t.value)
return t
def t_UINT(t):
r'\d+'
t.value = int(t.value)
return t
def t_SIGN(t):
r'[+-](?=\d)'
t.value = int(t.value + '1')
return t
# This needs to be a function so we can force it to happen
# before t_UNIT
def t_FUNCNAME(t):
r'((sqrt)|(ln)|(exp)|(log)|(mag)|(dB)|(dex))(?=\ *\()'
return t
def t_UNIT(t):
"%|([YZEPTGMkhdcmu\N{MICRO SIGN}npfazy]?'((?!\\d)\\w)+')|((?!\\d)\\w)+"
t.value = cls._get_unit(t)
return t
t_ignore = ' '
# Error handling rule
def t_error(t):
raise ValueError(
f"Invalid character at col {t.lexpos}")
return parsing.lex(lextab='generic_lextab', package='astropy/units',
reflags=int(re.UNICODE))
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the `FITS
standard
<http://fits.gsfc.nasa.gov/standard30/fits_standard30aa.pdf>`_,
Section 4.3, which is not terribly precise. The exact grammar
is here is based on the YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
This same grammar is used by the `"fits"` and `"vounit"`
formats, the only difference being the set of available unit
strings.
"""
tokens = cls._tokens
def p_main(p):
'''
main : unit
| structured_unit
| structured_subunit
'''
if isinstance(p[1], tuple):
# Unpack possible StructuredUnit inside a tuple, ie.,
# ignore any set of very outer parentheses.
p[0] = p[1][0]
else:
p[0] = p[1]
def p_structured_subunit(p):
'''
structured_subunit : OPEN_PAREN structured_unit CLOSE_PAREN
'''
# We hide a structured unit enclosed by parentheses inside
# a tuple, so that we can easily distinguish units like
# "(au, au/day), yr" from "au, au/day, yr".
p[0] = (p[2],)
def p_structured_unit(p):
'''
structured_unit : subunit COMMA
| subunit COMMA subunit
'''
from ..structured import StructuredUnit
inputs = (p[1],) if len(p) == 3 else (p[1], p[3])
units = ()
for subunit in inputs:
if isinstance(subunit, tuple):
# Structured unit that should be its own entry in the
# new StructuredUnit (was enclosed in parentheses).
units += subunit
elif isinstance(subunit, StructuredUnit):
# Structured unit whose entries should be
# individiually added to the new StructuredUnit.
units += subunit.values()
else:
# Regular unit to be added to the StructuredUnit.
units += (subunit,)
p[0] = StructuredUnit(units)
def p_subunit(p):
'''
subunit : unit
| structured_unit
| structured_subunit
'''
p[0] = p[1]
def p_unit(p):
'''
unit : product_of_units
| factor product_of_units
| factor product product_of_units
| division_product_of_units
| factor division_product_of_units
| factor product division_product_of_units
| inverse_unit
| factor inverse_unit
| factor product inverse_unit
| factor
'''
from astropy.units.core import Unit
if len(p) == 2:
p[0] = Unit(p[1])
elif len(p) == 3:
p[0] = Unit(p[1] * p[2])
elif len(p) == 4:
p[0] = Unit(p[1] * p[3])
def p_division_product_of_units(p):
'''
division_product_of_units : division_product_of_units division product_of_units
| product_of_units
'''
from astropy.units.core import Unit
if len(p) == 4:
p[0] = Unit(p[1] / p[3])
else:
p[0] = p[1]
def p_inverse_unit(p):
'''
inverse_unit : division unit_expression
'''
p[0] = p[2] ** -1
def p_factor(p):
'''
factor : factor_fits
| factor_float
| factor_int
'''
p[0] = p[1]
def p_factor_float(p):
'''
factor_float : signed_float
| signed_float UINT signed_int
| signed_float UINT power numeric_power
'''
if cls.name == 'fits':
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 4:
p[0] = p[1] * p[2] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** float(p[4])
elif len(p) == 2:
p[0] = p[1]
def p_factor_int(p):
'''
factor_int : UINT
| UINT signed_int
| UINT power numeric_power
| UINT UINT signed_int
| UINT UINT power numeric_power
'''
if cls.name == 'fits':
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** float(p[2])
elif len(p) == 4:
if isinstance(p[2], int):
p[0] = p[1] * p[2] ** float(p[3])
else:
p[0] = p[1] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** p[4]
def p_factor_fits(p):
'''
factor_fits : UINT power OPEN_PAREN signed_int CLOSE_PAREN
| UINT power OPEN_PAREN UINT CLOSE_PAREN
| UINT power signed_int
| UINT power UINT
| UINT SIGN UINT
| UINT OPEN_PAREN signed_int CLOSE_PAREN
'''
if p[1] != 10:
if cls.name == 'fits':
raise ValueError("Base must be 10")
else:
return
if len(p) == 4:
if p[2] in ('**', '^'):
p[0] = 10 ** p[3]
else:
p[0] = 10 ** (p[2] * p[3])
elif len(p) == 5:
p[0] = 10 ** p[3]
elif len(p) == 6:
p[0] = 10 ** p[4]
def p_product_of_units(p):
'''
product_of_units : unit_expression product product_of_units
| unit_expression product_of_units
| unit_expression
'''
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1] * p[3]
def p_unit_expression(p):
'''
unit_expression : function
| unit_with_power
| OPEN_PAREN product_of_units CLOSE_PAREN
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
def p_unit_with_power(p):
'''
unit_with_power : UNIT power numeric_power
| UNIT numeric_power
| UNIT
'''
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** p[2]
else:
p[0] = p[1] ** p[3]
def p_numeric_power(p):
'''
numeric_power : sign UINT
| OPEN_PAREN paren_expr CLOSE_PAREN
'''
if len(p) == 3:
p[0] = p[1] * p[2]
elif len(p) == 4:
p[0] = p[2]
def p_paren_expr(p):
'''
paren_expr : sign UINT
| signed_float
| frac
'''
if len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1]
def p_frac(p):
'''
frac : sign UINT division sign UINT
'''
p[0] = Fraction(p[1] * p[2], p[4] * p[5])
def p_sign(p):
'''
sign : SIGN
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1
def p_product(p):
'''
product : STAR
| PERIOD
'''
pass
def p_division(p):
'''
division : SOLIDUS
'''
pass
def p_power(p):
'''
power : DOUBLE_STAR
| CARET
'''
p[0] = p[1]
def p_signed_int(p):
'''
signed_int : SIGN UINT
'''
p[0] = p[1] * p[2]
def p_signed_float(p):
'''
signed_float : sign UINT
| sign UFLOAT
'''
p[0] = p[1] * p[2]
def p_function_name(p):
'''
function_name : FUNCNAME
'''
p[0] = p[1]
def p_function(p):
'''
function : function_name OPEN_PAREN main CLOSE_PAREN
'''
if p[1] == 'sqrt':
p[0] = p[3] ** 0.5
return
elif p[1] in ('mag', 'dB', 'dex'):
function_unit = cls._parse_unit(p[1])
# In Generic, this is callable, but that does not have to
# be the case in subclasses (e.g., in VOUnit it is not).
if callable(function_unit):
p[0] = function_unit(p[3])
return
raise ValueError(f"'{p[1]}' is not a recognized function")
def p_error(p):
raise ValueError()
return parsing.yacc(tabmodule='generic_parsetab', package='astropy/units')
@classmethod
def _get_unit(cls, t):
try:
return cls._parse_unit(t.value)
except ValueError as e:
registry = core.get_current_unit_registry()
if t.value in registry.aliases:
return registry.aliases[t.value]
raise ValueError(
f"At col {t.lexpos}, {str(e)}")
@classmethod
def _parse_unit(cls, s, detailed_exception=True):
registry = core.get_current_unit_registry().registry
if s in cls._unit_symbols:
s = cls._unit_symbols[s]
elif not s.isascii():
if s[0] == '\N{MICRO SIGN}':
s = 'u' + s[1:]
if s[-1] in cls._prefixable_unit_symbols:
s = s[:-1] + cls._prefixable_unit_symbols[s[-1]]
elif len(s) > 1 and s[-1] in cls._unit_suffix_symbols:
s = s[:-1] + cls._unit_suffix_symbols[s[-1]]
elif s.endswith('R\N{INFINITY}'):
s = s[:-2] + 'Ry'
if s in registry:
return registry[s]
if detailed_exception:
raise ValueError(
f'{s} is not a valid unit. {did_you_mean(s, registry)}')
else:
raise ValueError()
_unit_symbols = {
'%': 'percent',
'\N{PRIME}': 'arcmin',
'\N{DOUBLE PRIME}': 'arcsec',
'\N{MODIFIER LETTER SMALL H}': 'hourangle',
'e\N{SUPERSCRIPT MINUS}': 'electron',
}
_prefixable_unit_symbols = {
'\N{GREEK CAPITAL LETTER OMEGA}': 'Ohm',
'\N{LATIN CAPITAL LETTER A WITH RING ABOVE}': 'Angstrom',
'\N{SCRIPT SMALL L}': 'l',
}
_unit_suffix_symbols = {
'\N{CIRCLED DOT OPERATOR}': 'sun',
'\N{SUN}': 'sun',
'\N{CIRCLED PLUS}': 'earth',
'\N{EARTH}': 'earth',
'\N{JUPITER}': 'jupiter',
'\N{LATIN SUBSCRIPT SMALL LETTER E}': '_e',
'\N{LATIN SUBSCRIPT SMALL LETTER P}': '_p',
}
_translations = str.maketrans({
'\N{GREEK SMALL LETTER MU}': '\N{MICRO SIGN}',
'\N{MINUS SIGN}': '-',
})
"""Character translations that should be applied before parsing a string.
Note that this does explicitly *not* generally translate MICRO SIGN to u,
since then a string like 'µ' would be interpreted as unit mass.
"""
_superscripts = (
'\N{SUPERSCRIPT MINUS}'
'\N{SUPERSCRIPT PLUS SIGN}'
'\N{SUPERSCRIPT ZERO}'
'\N{SUPERSCRIPT ONE}'
'\N{SUPERSCRIPT TWO}'
'\N{SUPERSCRIPT THREE}'
'\N{SUPERSCRIPT FOUR}'
'\N{SUPERSCRIPT FIVE}'
'\N{SUPERSCRIPT SIX}'
'\N{SUPERSCRIPT SEVEN}'
'\N{SUPERSCRIPT EIGHT}'
'\N{SUPERSCRIPT NINE}'
)
_superscript_translations = str.maketrans(_superscripts, '-+0123456789')
_regex_superscript = re.compile(f'[{_superscripts}]?[{_superscripts[2:]}]+')
_regex_deg = re.compile('°([CF])?')
@classmethod
def _convert_superscript(cls, m):
return f'({m.group().translate(cls._superscript_translations)})'
@classmethod
def _convert_deg(cls, m):
if len(m.string) == 1:
return 'deg'
return m.string.replace('°', 'deg_')
@classmethod
def parse(cls, s, debug=False):
if not isinstance(s, str):
s = s.decode('ascii')
elif not s.isascii():
# common normalization of unicode strings to avoid
# having to deal with multiple representations of
# the same character. This normalizes to "composed" form
# and will e.g. convert OHM SIGN to GREEK CAPITAL LETTER OMEGA
s = unicodedata.normalize('NFC', s)
# Translate some basic unicode items that we'd like to support on
# input but are not standard.
s = s.translate(cls._translations)
# TODO: might the below be better done in the parser/lexer?
# Translate superscripts to parenthesized numbers; this ensures
# that mixes of superscripts and regular numbers fail.
s = cls._regex_superscript.sub(cls._convert_superscript, s)
# Translate possible degrees.
s = cls._regex_deg.sub(cls._convert_deg, s)
result = cls._do_parse(s, debug=debug)
# Check for excess solidi, but exclude fractional exponents (accepted)
n_slashes = s.count('/')
if n_slashes > 1 and (n_slashes - len(re.findall(r'\(\d+/\d+\)', s))) > 1:
warnings.warn(
"'{}' contains multiple slashes, which is "
"discouraged by the FITS standard".format(s),
core.UnitsWarning)
return result
@classmethod
def _do_parse(cls, s, debug=False):
try:
# This is a short circuit for the case where the string
# is just a single unit name
return cls._parse_unit(s, detailed_exception=False)
except ValueError as e:
try:
return cls._parser.parse(s, lexer=cls._lexer, debug=debug)
except ValueError as e:
if str(e):
raise
else:
raise ValueError(f"Syntax error parsing unit '{s}'")
@classmethod
def _get_unit_name(cls, unit):
return unit.get_format_name('generic')
@classmethod
def _format_unit_list(cls, units):
out = []
units.sort(key=lambda x: cls._get_unit_name(x[0]).lower())
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
power = utils.format_power(power)
if '/' in power or '.' in power:
out.append(f'{cls._get_unit_name(base)}({power})')
else:
out.append(f'{cls._get_unit_name(base)}{power}')
return ' '.join(out)
@classmethod
def to_string(cls, unit):
return _to_string(cls, unit)
class Unscaled(Generic):
"""
A format that doesn't display the scale part of the unit, other
than that, it is identical to the `Generic` format.
This is used in some error messages where the scale is irrelevant.
"""
_show_scale = False
|
360a1e92d08434840efe60d5fb1cdc62bc2e12f00c8c2557676421f60bce7781 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# The idea for this module (but no code) was borrowed from the
# quantities (http://pythonhosted.org/quantities/) package.
"""Helper functions for Quantity.
In particular, this implements the logic that determines scaling and result
units for a given ufunc, given input units.
"""
from fractions import Fraction
import numpy as np
from astropy.units.core import (
UnitConversionError, UnitsError, UnitTypeError, dimensionless_unscaled,
get_current_unit_registry, unit_scale_converter)
from . import UFUNC_HELPERS, UNSUPPORTED_UFUNCS
def _d(unit):
if unit is None:
return dimensionless_unscaled
else:
return unit
def get_converter(from_unit, to_unit):
"""Like Unit._get_converter, except returns None if no scaling is needed,
i.e., if the inferred scale is unity."""
converter = from_unit._get_converter(to_unit)
return None if converter is unit_scale_converter else converter
def get_converters_and_unit(f, unit1, unit2):
converters = [None, None]
# By default, we try adjusting unit2 to unit1, so that the result will
# be unit1 as well. But if there is no second unit, we have to try
# adjusting unit1 (to dimensionless, see below).
if unit2 is None:
if unit1 is None:
# No units for any input -- e.g., np.add(a1, a2, out=q)
return converters, dimensionless_unscaled
changeable = 0
# swap units.
unit2 = unit1
unit1 = None
elif unit2 is unit1:
# ensure identical units is fast ("==" is slow, so avoid that).
return converters, unit1
else:
changeable = 1
# Try to get a converter from unit2 to unit1.
if unit1 is None:
try:
converters[changeable] = get_converter(unit2,
dimensionless_unscaled)
except UnitsError:
# special case: would be OK if unitless number is zero, inf, nan
converters[1-changeable] = False
return converters, unit2
else:
return converters, dimensionless_unscaled
else:
try:
converters[changeable] = get_converter(unit2, unit1)
except UnitsError:
raise UnitConversionError(
"Can only apply '{}' function to quantities "
"with compatible dimensions"
.format(f.__name__))
return converters, unit1
# SINGLE ARGUMENT UFUNC HELPERS
#
# The functions below take a single argument, which is the quantity upon which
# the ufunc is being used. The output of the helper function should be two
# values: a list with a single converter to be used to scale the input before
# it is being passed to the ufunc (or None if no conversion is needed), and
# the unit the output will be in.
def helper_onearg_test(f, unit):
return ([None], None)
def helper_invariant(f, unit):
return ([None], _d(unit))
def helper_square(f, unit):
return ([None], unit ** 2 if unit is not None else dimensionless_unscaled)
def helper_reciprocal(f, unit):
return ([None], unit ** -1 if unit is not None else dimensionless_unscaled)
one_half = 0.5 # faster than Fraction(1, 2)
one_third = Fraction(1, 3)
def helper_sqrt(f, unit):
return ([None], unit ** one_half if unit is not None
else dimensionless_unscaled)
def helper_cbrt(f, unit):
return ([None], (unit ** one_third if unit is not None
else dimensionless_unscaled))
def helper_modf(f, unit):
if unit is None:
return [None], (dimensionless_unscaled, dimensionless_unscaled)
try:
return ([get_converter(unit, dimensionless_unscaled)],
(dimensionless_unscaled, dimensionless_unscaled))
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper__ones_like(f, unit):
return [None], dimensionless_unscaled
def helper_dimensionless_to_dimensionless(f, unit):
if unit is None:
return [None], dimensionless_unscaled
try:
return ([get_converter(unit, dimensionless_unscaled)],
dimensionless_unscaled)
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper_dimensionless_to_radian(f, unit):
from astropy.units.si import radian
if unit is None:
return [None], radian
try:
return [get_converter(unit, dimensionless_unscaled)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper_degree_to_radian(f, unit):
from astropy.units.si import degree, radian
try:
return [get_converter(unit, degree)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_radian_to_degree(f, unit):
from astropy.units.si import degree, radian
try:
return [get_converter(unit, radian)], degree
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_radian_to_dimensionless(f, unit):
from astropy.units.si import radian
try:
return [get_converter(unit, radian)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_frexp(f, unit):
if not unit.is_unity():
raise UnitTypeError("Can only apply '{}' function to "
"unscaled dimensionless quantities"
.format(f.__name__))
return [None], (None, None)
# TWO ARGUMENT UFUNC HELPERS
#
# The functions below take a two arguments. The output of the helper function
# should be two values: a tuple of two converters to be used to scale the
# inputs before being passed to the ufunc (None if no conversion is needed),
# and the unit the output will be in.
def helper_multiplication(f, unit1, unit2):
return [None, None], _d(unit1) * _d(unit2)
def helper_division(f, unit1, unit2):
return [None, None], _d(unit1) / _d(unit2)
def helper_power(f, unit1, unit2):
# TODO: find a better way to do this, currently need to signal that one
# still needs to raise power of unit1 in main code
if unit2 is None:
return [None, None], False
try:
return [None, get_converter(unit2, dimensionless_unscaled)], False
except UnitsError:
raise UnitTypeError("Can only raise something to a "
"dimensionless quantity")
def helper_ldexp(f, unit1, unit2):
if unit2 is not None:
raise TypeError("Cannot use ldexp with a quantity "
"as second argument.")
else:
return [None, None], _d(unit1)
def helper_copysign(f, unit1, unit2):
# if first arg is not a quantity, just return plain array
if unit1 is None:
return [None, None], None
else:
return [None, None], unit1
def helper_heaviside(f, unit1, unit2):
try:
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply 'heaviside' function with a "
"dimensionless second argument.")
return ([None, converter2], dimensionless_unscaled)
def helper_two_arg_dimensionless(f, unit1, unit2):
try:
converter1 = (get_converter(unit1, dimensionless_unscaled)
if unit1 is not None else None)
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"dimensionless quantities"
.format(f.__name__))
return ([converter1, converter2], dimensionless_unscaled)
# This used to be a separate function that just called get_converters_and_unit.
# Using it directly saves a few us; keeping the clearer name.
helper_twoarg_invariant = get_converters_and_unit
def helper_twoarg_comparison(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, None
def helper_twoarg_invtrig(f, unit1, unit2):
from astropy.units.si import radian
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, radian
def helper_twoarg_floor_divide(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, dimensionless_unscaled
def helper_divmod(f, unit1, unit2):
converters, result_unit = get_converters_and_unit(f, unit1, unit2)
return converters, (dimensionless_unscaled, result_unit)
def helper_clip(f, unit1, unit2, unit3):
# Treat the array being clipped as primary.
converters = [None]
if unit1 is None:
result_unit = dimensionless_unscaled
try:
converters += [(None if unit is None else
get_converter(unit, dimensionless_unscaled))
for unit in (unit2, unit3)]
except UnitsError:
raise UnitConversionError(
"Can only apply '{}' function to quantities with "
"compatible dimensions".format(f.__name__))
else:
result_unit = unit1
for unit in unit2, unit3:
try:
converter = get_converter(_d(unit), result_unit)
except UnitsError:
if unit is None:
# special case: OK if unitless number is zero, inf, nan
converters.append(False)
else:
raise UnitConversionError(
"Can only apply '{}' function to quantities with "
"compatible dimensions".format(f.__name__))
else:
converters.append(converter)
return converters, result_unit
# list of ufuncs:
# https://numpy.org/doc/stable/reference/ufuncs.html#available-ufuncs
UNSUPPORTED_UFUNCS |= {
np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.invert, np.left_shift,
np.right_shift, np.logical_and, np.logical_or, np.logical_xor,
np.logical_not, np.isnat, np.gcd, np.lcm}
# SINGLE ARGUMENT UFUNCS
# ufuncs that do not care about the unit and do not return a Quantity
# (but rather a boolean, or -1, 0, or +1 for np.sign).
onearg_test_ufuncs = (np.isfinite, np.isinf, np.isnan, np.sign, np.signbit)
for ufunc in onearg_test_ufuncs:
UFUNC_HELPERS[ufunc] = helper_onearg_test
# ufuncs that return a value with the same unit as the input
invariant_ufuncs = (np.absolute, np.fabs, np.conj, np.conjugate, np.negative,
np.spacing, np.rint, np.floor, np.ceil, np.trunc,
np.positive)
for ufunc in invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_invariant
# ufuncs that require dimensionless input and and give dimensionless output
dimensionless_to_dimensionless_ufuncs = (np.exp, np.expm1, np.exp2, np.log,
np.log10, np.log2, np.log1p)
# Default numpy does not ship an "erf" ufunc, but some versions hacked by
# intel do. This is bad, since it means code written for that numpy will
# not run on non-hacked numpy. But still, we might as well support it.
if isinstance(getattr(np.core.umath, 'erf', None), np.ufunc):
dimensionless_to_dimensionless_ufuncs += (np.core.umath.erf,)
for ufunc in dimensionless_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_dimensionless
# ufuncs that require dimensionless input and give output in radians
dimensionless_to_radian_ufuncs = (np.arccos, np.arcsin, np.arctan, np.arccosh,
np.arcsinh, np.arctanh)
for ufunc in dimensionless_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_radian
# ufuncs that require input in degrees and give output in radians
degree_to_radian_ufuncs = (np.radians, np.deg2rad)
for ufunc in degree_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_degree_to_radian
# ufuncs that require input in radians and give output in degrees
radian_to_degree_ufuncs = (np.degrees, np.rad2deg)
for ufunc in radian_to_degree_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_degree
# ufuncs that require input in radians and give dimensionless output
radian_to_dimensionless_ufuncs = (np.cos, np.sin, np.tan, np.cosh, np.sinh,
np.tanh)
for ufunc in radian_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_dimensionless
# ufuncs handled as special cases
UFUNC_HELPERS[np.sqrt] = helper_sqrt
UFUNC_HELPERS[np.square] = helper_square
UFUNC_HELPERS[np.reciprocal] = helper_reciprocal
UFUNC_HELPERS[np.cbrt] = helper_cbrt
UFUNC_HELPERS[np.core.umath._ones_like] = helper__ones_like
UFUNC_HELPERS[np.modf] = helper_modf
UFUNC_HELPERS[np.frexp] = helper_frexp
# TWO ARGUMENT UFUNCS
# two argument ufuncs that require dimensionless input and and give
# dimensionless output
two_arg_dimensionless_ufuncs = (np.logaddexp, np.logaddexp2)
for ufunc in two_arg_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_two_arg_dimensionless
# two argument ufuncs that return a value with the same unit as the input
twoarg_invariant_ufuncs = (np.add, np.subtract, np.hypot, np.maximum,
np.minimum, np.fmin, np.fmax, np.nextafter,
np.remainder, np.mod, np.fmod)
for ufunc in twoarg_invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invariant
# two argument ufuncs that need compatible inputs and return a boolean
twoarg_comparison_ufuncs = (np.greater, np.greater_equal, np.less,
np.less_equal, np.not_equal, np.equal)
for ufunc in twoarg_comparison_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_comparison
# two argument ufuncs that do inverse trigonometry
twoarg_invtrig_ufuncs = (np.arctan2,)
# another private function in numpy; use getattr in case it disappears
if isinstance(getattr(np.core.umath, '_arg', None), np.ufunc):
twoarg_invtrig_ufuncs += (np.core.umath._arg,)
for ufunc in twoarg_invtrig_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invtrig
# ufuncs handled as special cases
UFUNC_HELPERS[np.multiply] = helper_multiplication
if isinstance(getattr(np, 'matmul', None), np.ufunc):
UFUNC_HELPERS[np.matmul] = helper_multiplication
UFUNC_HELPERS[np.divide] = helper_division
UFUNC_HELPERS[np.true_divide] = helper_division
UFUNC_HELPERS[np.power] = helper_power
UFUNC_HELPERS[np.ldexp] = helper_ldexp
UFUNC_HELPERS[np.copysign] = helper_copysign
UFUNC_HELPERS[np.floor_divide] = helper_twoarg_floor_divide
UFUNC_HELPERS[np.heaviside] = helper_heaviside
UFUNC_HELPERS[np.float_power] = helper_power
UFUNC_HELPERS[np.divmod] = helper_divmod
# Check for clip ufunc; note that np.clip is a wrapper function, not the ufunc.
if isinstance(getattr(np.core.umath, 'clip', None), np.ufunc):
UFUNC_HELPERS[np.core.umath.clip] = helper_clip
del ufunc
|
66146588e7118617a42fe643b73dbdcb3b8ad2eb354ae1adbd4779f24ac2d7d9 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Converters for Quantity."""
import threading
import numpy as np
from astropy.units.core import (
UnitConversionError, UnitsError, UnitTypeError, dimensionless_unscaled)
__all__ = ['can_have_arbitrary_unit', 'converters_and_unit',
'check_output', 'UFUNC_HELPERS', 'UNSUPPORTED_UFUNCS']
class UfuncHelpers(dict):
"""Registry of unit conversion functions to help ufunc evaluation.
Based on dict for quick access, but with a missing method to load
helpers for additional modules such as scipy.special and erfa.
Such modules should be registered using ``register_module``.
"""
def __init__(self, *args, **kwargs):
self.modules = {}
self.UNSUPPORTED = set() # Upper-case for backwards compatibility
self._lock = threading.RLock()
super().__init__(*args, **kwargs)
def register_module(self, module, names, importer):
"""Register (but do not import) a set of ufunc helpers.
Parameters
----------
module : str
Name of the module with the ufuncs (e.g., 'scipy.special').
names : iterable of str
Names of the module ufuncs for which helpers are available.
importer : callable
Function that imports the ufuncs and returns a dict of helpers
keyed by those ufuncs. If the value is `None`, the ufunc is
explicitly *not* supported.
"""
with self._lock:
self.modules[module] = {'names': names,
'importer': importer}
def import_module(self, module):
"""Import the helpers from the given module using its helper function.
Parameters
----------
module : str
Name of the module. Has to have been registered beforehand.
"""
with self._lock:
module_info = self.modules.pop(module)
self.update(module_info['importer']())
def __missing__(self, ufunc):
"""Called if a ufunc is not found.
Check if the ufunc is in any of the available modules, and, if so,
import the helpers for that module.
"""
with self._lock:
# Check if it was loaded while we waited for the lock
if ufunc in self:
return self[ufunc]
if ufunc in self.UNSUPPORTED:
raise TypeError(f"Cannot use ufunc '{ufunc.__name__}' with quantities")
for module, module_info in list(self.modules.items()):
if ufunc.__name__ in module_info['names']:
# A ufunc with the same name is supported by this module.
# Of course, this doesn't necessarily mean it is the
# right module. So, we try let the importer do its work.
# If it fails (e.g., for `scipy.special`), then that's
# fine, just raise the TypeError. If it succeeds, but
# the ufunc is not found, that is also fine: we will
# enter __missing__ again and either find another
# module or get the TypeError there.
try:
self.import_module(module)
except ImportError: # pragma: no cover
pass
else:
return self[ufunc]
raise TypeError("unknown ufunc {}. If you believe this ufunc "
"should be supported, please raise an issue on "
"https://github.com/astropy/astropy"
.format(ufunc.__name__))
def __setitem__(self, key, value):
# Implementation note: in principle, we could just let `None`
# mean that something is not implemented, but this means an
# extra if clause for the output, slowing down the common
# path where a ufunc is supported.
with self._lock:
if value is None:
self.UNSUPPORTED |= {key}
self.pop(key, None)
else:
super().__setitem__(key, value)
self.UNSUPPORTED -= {key}
UFUNC_HELPERS = UfuncHelpers()
UNSUPPORTED_UFUNCS = UFUNC_HELPERS.UNSUPPORTED
def can_have_arbitrary_unit(value):
"""Test whether the items in value can have arbitrary units
Numbers whose value does not change upon a unit change, i.e.,
zero, infinity, or not-a-number
Parameters
----------
value : number or array
Returns
-------
bool
`True` if each member is either zero or not finite, `False` otherwise
"""
return np.all(np.logical_or(np.equal(value, 0.), ~np.isfinite(value)))
def converters_and_unit(function, method, *args):
"""Determine the required converters and the unit of the ufunc result.
Converters are functions required to convert to a ufunc's expected unit,
e.g., radian for np.sin; or to ensure units of two inputs are consistent,
e.g., for np.add. In these examples, the unit of the result would be
dimensionless_unscaled for np.sin, and the same consistent unit for np.add.
Parameters
----------
function : `~numpy.ufunc`
Numpy universal function
method : str
Method with which the function is evaluated, e.g.,
'__call__', 'reduce', etc.
*args : `~astropy.units.Quantity` or ndarray subclass
Input arguments to the function
Raises
------
TypeError : when the specified function cannot be used with Quantities
(e.g., np.logical_or), or when the routine does not know how to handle
the specified function (in which case an issue should be raised on
https://github.com/astropy/astropy).
UnitTypeError : when the conversion to the required (or consistent) units
is not possible.
"""
# Check whether we support this ufunc, by getting the helper function
# (defined in helpers) which returns a list of function(s) that convert the
# input(s) to the unit required for the ufunc, as well as the unit the
# result will have (a tuple of units if there are multiple outputs).
ufunc_helper = UFUNC_HELPERS[function]
if method == '__call__' or (method == 'outer' and function.nin == 2):
# Find out the units of the arguments passed to the ufunc; usually,
# at least one is a quantity, but for two-argument ufuncs, the second
# could also be a Numpy array, etc. These are given unit=None.
units = [getattr(arg, 'unit', None) for arg in args]
# Determine possible conversion functions, and the result unit.
converters, result_unit = ufunc_helper(function, *units)
if any(converter is False for converter in converters):
# for multi-argument ufuncs with a quantity and a non-quantity,
# the quantity normally needs to be dimensionless, *except*
# if the non-quantity can have arbitrary unit, i.e., when it
# is all zero, infinity or NaN. In that case, the non-quantity
# can just have the unit of the quantity
# (this allows, e.g., `q > 0.` independent of unit)
try:
# Don't fold this loop in the test above: this rare case
# should not make the common case slower.
for i, converter in enumerate(converters):
if converter is not False:
continue
if can_have_arbitrary_unit(args[i]):
converters[i] = None
else:
raise UnitConversionError(
"Can only apply '{}' function to "
"dimensionless quantities when other "
"argument is not a quantity (unless the "
"latter is all zero/infinity/nan)"
.format(function.__name__))
except TypeError:
# _can_have_arbitrary_unit failed: arg could not be compared
# with zero or checked to be finite. Then, ufunc will fail too.
raise TypeError("Unsupported operand type(s) for ufunc {}: "
"'{}'".format(function.__name__,
','.join([arg.__class__.__name__
for arg in args])))
# In the case of np.power and np.float_power, the unit itself needs to
# be modified by an amount that depends on one of the input values,
# so we need to treat this as a special case.
# TODO: find a better way to deal with this.
if result_unit is False:
if units[0] is None or units[0] == dimensionless_unscaled:
result_unit = dimensionless_unscaled
else:
if units[1] is None:
p = args[1]
else:
p = args[1].to(dimensionless_unscaled).value
try:
result_unit = units[0] ** p
except ValueError as exc:
# Changing the unit does not work for, e.g., array-shaped
# power, but this is OK if we're (scaled) dimensionless.
try:
converters[0] = units[0]._get_converter(
dimensionless_unscaled)
except UnitConversionError:
raise exc
else:
result_unit = dimensionless_unscaled
else: # methods for which the unit should stay the same
nin = function.nin
unit = getattr(args[0], 'unit', None)
if method == 'at' and nin <= 2:
if nin == 1:
units = [unit]
else:
units = [unit, getattr(args[2], 'unit', None)]
converters, result_unit = ufunc_helper(function, *units)
# ensure there is no 'converter' for indices (2nd argument)
converters.insert(1, None)
elif method in {'reduce', 'accumulate', 'reduceat'} and nin == 2:
converters, result_unit = ufunc_helper(function, unit, unit)
converters = converters[:1]
if method == 'reduceat':
# add 'scale' for indices (2nd argument)
converters += [None]
else:
if method in {'reduce', 'accumulate',
'reduceat', 'outer'} and nin != 2:
raise ValueError(f"{method} only supported for binary functions")
raise TypeError("Unexpected ufunc method {}. If this should "
"work, please raise an issue on"
"https://github.com/astropy/astropy"
.format(method))
# for all but __call__ method, scaling is not allowed
if unit is not None and result_unit is None:
raise TypeError("Cannot use '{1}' method on ufunc {0} with a "
"Quantity instance as the result is not a "
"Quantity.".format(function.__name__, method))
if (converters[0] is not None or
(unit is not None and unit is not result_unit and
(not result_unit.is_equivalent(unit) or
result_unit.to(unit) != 1.))):
# NOTE: this cannot be the more logical UnitTypeError, since
# then things like np.cumprod will not longer fail (they check
# for TypeError).
raise UnitsError("Cannot use '{1}' method on ufunc {0} with a "
"Quantity instance as it would change the unit."
.format(function.__name__, method))
return converters, result_unit
def check_output(output, unit, inputs, function=None):
"""Check that function output can be stored in the output array given.
Parameters
----------
output : array or `~astropy.units.Quantity` or tuple
Array that should hold the function output (or tuple of such arrays).
unit : `~astropy.units.Unit` or None, or tuple
Unit that the output will have, or `None` for pure numbers (should be
tuple of same if output is a tuple of outputs).
inputs : tuple
Any input arguments. These should be castable to the output.
function : callable
The function that will be producing the output. If given, used to
give a more informative error message.
Returns
-------
arrays : ndarray view or tuple thereof
The view(s) is of ``output``.
Raises
------
UnitTypeError : If ``unit`` is inconsistent with the class of ``output``
TypeError : If the ``inputs`` cannot be cast safely to ``output``.
"""
if isinstance(output, tuple):
return tuple(check_output(output_, unit_, inputs, function)
for output_, unit_ in zip(output, unit))
# ``None`` indicates no actual array is needed. This can happen, e.g.,
# with np.modf(a, out=(None, b)).
if output is None:
return None
if hasattr(output, '__quantity_subclass__'):
# Check that we're not trying to store a plain Numpy array or a
# Quantity with an inconsistent unit (e.g., not angular for Angle).
if unit is None:
raise TypeError("Cannot store non-quantity output{} in {} "
"instance".format(
(f" from {function.__name__} function"
if function is not None else ""),
type(output)))
q_cls, subok = output.__quantity_subclass__(unit)
if not (subok or q_cls is type(output)):
raise UnitTypeError(
"Cannot store output with unit '{}'{} "
"in {} instance. Use {} instance instead."
.format(unit, (f" from {function.__name__} function"
if function is not None else ""),
type(output), q_cls))
# check we can handle the dtype (e.g., that we are not int
# when float is required). Note that we only do this for Quantity
# output; for array output, we defer to numpy's default handling.
# Also, any structured dtype are ignored (likely erfa ufuncs).
# TODO: make more logical; is this necessary at all?
if inputs and not output.dtype.names:
result_type = np.result_type(*inputs)
if not (result_type.names
or np.can_cast(result_type, output.dtype,
casting='same_kind')):
raise TypeError("Arguments cannot be cast safely to inplace "
"output with dtype={}".format(output.dtype))
# Turn into ndarray, so we do not loop into array_wrap/array_ufunc
# if the output is used to store results of a function.
return output.view(np.ndarray)
else:
# output is not a Quantity, so cannot obtain a unit.
if not (unit is None or unit is dimensionless_unscaled):
raise UnitTypeError("Cannot store quantity with dimension "
"{}in a non-Quantity instance."
.format("" if function is None else
"resulting from {} function "
.format(function.__name__)))
return output
|
679c96b67138e4652c978d0ee920a37b28c2131f03140a81e749a0ed589c654e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Helper functions for Quantity.
In particular, this implements the logic that determines scaling and result
units for a given ufunc, given input units.
"""
from .converters import *
# isort: split
# By importing helpers, all the unit conversion functions needed for
# numpy ufuncs and functions are defined.
# For scipy.special and erfa, importing the helper modules ensures
# the definitions are added as modules to UFUNC_HELPERS, to be loaded
# on demand.
from . import erfa, function_helpers, helpers, scipy_special
|
ae7ce60bc97f7def23090c5afa1c0dda206b43011bf76c9e6ec298a8bd1cf408 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Quantity helpers for the ERFA ufuncs."""
# Tests for these are in coordinates, not in units.
from erfa import dt_eraASTROM, dt_eraLDBODY, dt_pv
from erfa import ufunc as erfa_ufunc
from astropy.units.core import UnitsError, UnitTypeError, dimensionless_unscaled
from astropy.units.structured import StructuredUnit
from . import UFUNC_HELPERS
from .helpers import (
_d, get_converter, helper_invariant, helper_multiplication, helper_twoarg_invariant)
erfa_ufuncs = ('s2c', 's2p', 'c2s', 'p2s', 'pm', 'pdp', 'pxp', 'rxp',
'cpv', 'p2pv', 'pv2p', 'pv2s', 'pvdpv', 'pvm', 'pvmpv', 'pvppv',
'pvstar', 'pvtob', 'pvu', 'pvup', 'pvxpv', 'rxpv', 's2pv', 's2xpv',
'starpv', 'sxpv', 'trxpv', 'gd2gc', 'gc2gd', 'ldn', 'aper',
'apio', 'atciq', 'atciqn', 'atciqz', 'aticq', 'atioq', 'atoiq')
def has_matching_structure(unit, dtype):
dtype_fields = dtype.fields
if dtype_fields:
return (isinstance(unit, StructuredUnit)
and len(unit) == len(dtype_fields)
and all(has_matching_structure(u, df_v[0])
for (u, df_v) in zip(unit.values(), dtype_fields.values())))
else:
return not isinstance(unit, StructuredUnit)
def check_structured_unit(unit, dtype):
if not has_matching_structure(unit, dtype):
msg = {dt_pv: 'pv',
dt_eraLDBODY: 'ldbody',
dt_eraASTROM: 'astrom'}.get(dtype, 'function')
raise UnitTypeError(f'{msg} input needs unit matching dtype={dtype}.')
def helper_s2c(f, unit1, unit2):
from astropy.units.si import radian
try:
return [get_converter(unit1, radian),
get_converter(unit2, radian)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_s2p(f, unit1, unit2, unit3):
from astropy.units.si import radian
try:
return [get_converter(unit1, radian),
get_converter(unit2, radian), None], unit3
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_c2s(f, unit1):
from astropy.units.si import radian
return [None], (radian, radian)
def helper_p2s(f, unit1):
from astropy.units.si import radian
return [None], (radian, radian, unit1)
def helper_gc2gd(f, nounit, unit1):
from astropy.units.si import m, radian
if nounit is not None:
raise UnitTypeError("ellipsoid cannot be a quantity.")
try:
return [None, get_converter(unit1, m)], (radian, radian, m, None)
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"quantities with length units"
.format(f.__name__))
def helper_gd2gc(f, nounit, unit1, unit2, unit3):
from astropy.units.si import m, radian
if nounit is not None:
raise UnitTypeError("ellipsoid cannot be a quantity.")
try:
return [None,
get_converter(unit1, radian),
get_converter(unit2, radian),
get_converter(unit3, m)], (m, None)
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to lon, lat "
"with angle and height with length units"
.format(f.__name__))
def helper_p2pv(f, unit1):
from astropy.units.si import s
if isinstance(unit1, StructuredUnit):
raise UnitTypeError("p vector unit cannot be a structured unit.")
return [None], StructuredUnit((unit1, unit1 / s))
def helper_pv2p(f, unit1):
check_structured_unit(unit1, dt_pv)
return [None], unit1[0]
def helper_pv2s(f, unit_pv):
from astropy.units.si import radian
check_structured_unit(unit_pv, dt_pv)
ang_unit = radian * unit_pv[1] / unit_pv[0]
return [None], (radian, radian, unit_pv[0], ang_unit, ang_unit, unit_pv[1])
def helper_s2pv(f, unit_theta, unit_phi, unit_r, unit_td, unit_pd, unit_rd):
from astropy.units.si import radian
time_unit = unit_r / unit_rd
return [get_converter(unit_theta, radian),
get_converter(unit_phi, radian),
None,
get_converter(unit_td, radian / time_unit),
get_converter(unit_pd, radian / time_unit),
None], StructuredUnit((unit_r, unit_rd))
def helper_pv_multiplication(f, unit1, unit2):
check_structured_unit(unit1, dt_pv)
check_structured_unit(unit2, dt_pv)
result_unit = StructuredUnit((unit1[0] * unit2[0], unit1[1] * unit2[0]))
converter = get_converter(unit2, StructuredUnit(
(unit2[0], unit1[1] * unit2[0] / unit1[0])))
return [None, converter], result_unit
def helper_pvm(f, unit1):
check_structured_unit(unit1, dt_pv)
return [None], (unit1[0], unit1[1])
def helper_pvstar(f, unit1):
from astropy.units.astrophys import AU
from astropy.units.si import arcsec, day, km, radian, s, year
return [get_converter(unit1, StructuredUnit((AU, AU/day)))], (
radian, radian, radian / year, radian / year, arcsec, km / s, None)
def helper_starpv(f, unit_ra, unit_dec, unit_pmr, unit_pmd,
unit_px, unit_rv):
from astropy.units.astrophys import AU
from astropy.units.si import arcsec, day, km, radian, s, year
return [get_converter(unit_ra, radian),
get_converter(unit_dec, radian),
get_converter(unit_pmr, radian/year),
get_converter(unit_pmd, radian/year),
get_converter(unit_px, arcsec),
get_converter(unit_rv, km/s)], (StructuredUnit((AU, AU/day)), None)
def helper_pvtob(f, unit_elong, unit_phi, unit_hm,
unit_xp, unit_yp, unit_sp, unit_theta):
from astropy.units.si import m, radian, s
return [get_converter(unit_elong, radian),
get_converter(unit_phi, radian),
get_converter(unit_hm, m),
get_converter(unit_xp, radian),
get_converter(unit_yp, radian),
get_converter(unit_sp, radian),
get_converter(unit_theta, radian)], StructuredUnit((m, m/s))
def helper_pvu(f, unit_t, unit_pv):
check_structured_unit(unit_pv, dt_pv)
return [get_converter(unit_t, unit_pv[0]/unit_pv[1]), None], unit_pv
def helper_pvup(f, unit_t, unit_pv):
check_structured_unit(unit_pv, dt_pv)
return [get_converter(unit_t, unit_pv[0]/unit_pv[1]), None], unit_pv[0]
def helper_s2xpv(f, unit1, unit2, unit_pv):
check_structured_unit(unit_pv, dt_pv)
return [None, None, None], StructuredUnit((_d(unit1) * unit_pv[0],
_d(unit2) * unit_pv[1]))
def ldbody_unit():
from astropy.units.astrophys import AU, Msun
from astropy.units.si import day, radian
return StructuredUnit((Msun, radian, (AU, AU/day)),
erfa_ufunc.dt_eraLDBODY)
def astrom_unit():
from astropy.units.astrophys import AU
from astropy.units.si import rad, year
one = rel2c = dimensionless_unscaled
return StructuredUnit((year, AU, one, AU, rel2c, one, one, rad, rad, rad, rad,
one, one, rel2c, rad, rad, rad),
erfa_ufunc.dt_eraASTROM)
def helper_ldn(f, unit_b, unit_ob, unit_sc):
from astropy.units.astrophys import AU
return [get_converter(unit_b, ldbody_unit()),
get_converter(unit_ob, AU),
get_converter(_d(unit_sc), dimensionless_unscaled)], dimensionless_unscaled
def helper_aper(f, unit_theta, unit_astrom):
check_structured_unit(unit_astrom, dt_eraASTROM)
unit_along = unit_astrom[7] # along
if unit_astrom[14] is unit_along: # eral
result_unit = unit_astrom
else:
result_units = tuple((unit_along if i == 14 else v)
for i, v in enumerate(unit_astrom.values()))
result_unit = unit_astrom.__class__(result_units, names=unit_astrom)
return [get_converter(unit_theta, unit_along), None], result_unit
def helper_apio(f, unit_sp, unit_theta, unit_elong, unit_phi, unit_hm,
unit_xp, unit_yp, unit_refa, unit_refb):
from astropy.units.si import m, radian
return [get_converter(unit_sp, radian),
get_converter(unit_theta, radian),
get_converter(unit_elong, radian),
get_converter(unit_phi, radian),
get_converter(unit_hm, m),
get_converter(unit_xp, radian),
get_converter(unit_xp, radian),
get_converter(unit_xp, radian),
get_converter(unit_xp, radian)], astrom_unit()
def helper_atciq(f, unit_rc, unit_dc, unit_pr, unit_pd, unit_px, unit_rv, unit_astrom):
from astropy.units.si import arcsec, km, radian, s, year
return [get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_pr, radian / year),
get_converter(unit_pd, radian / year),
get_converter(unit_px, arcsec),
get_converter(unit_rv, km / s),
get_converter(unit_astrom, astrom_unit())], (radian, radian)
def helper_atciqn(f, unit_rc, unit_dc, unit_pr, unit_pd, unit_px, unit_rv, unit_astrom,
unit_b):
from astropy.units.si import arcsec, km, radian, s, year
return [get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_pr, radian / year),
get_converter(unit_pd, radian / year),
get_converter(unit_px, arcsec),
get_converter(unit_rv, km / s),
get_converter(unit_astrom, astrom_unit()),
get_converter(unit_b, ldbody_unit())], (radian, radian)
def helper_atciqz_aticq(f, unit_rc, unit_dc, unit_astrom):
from astropy.units.si import radian
return [get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_astrom, astrom_unit())], (radian, radian)
def helper_aticqn(f, unit_rc, unit_dc, unit_astrom, unit_b):
from astropy.units.si import radian
return [get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_astrom, astrom_unit()),
get_converter(unit_b, ldbody_unit())], (radian, radian)
def helper_atioq(f, unit_rc, unit_dc, unit_astrom):
from astropy.units.si import radian
return [get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_astrom, astrom_unit())], (radian,)*5
def helper_atoiq(f, unit_type, unit_ri, unit_di, unit_astrom):
from astropy.units.si import radian
if unit_type is not None:
raise UnitTypeError("argument 'type' should not have a unit")
return [None,
get_converter(unit_ri, radian),
get_converter(unit_di, radian),
get_converter(unit_astrom, astrom_unit())], (radian, radian)
def get_erfa_helpers():
ERFA_HELPERS = {}
ERFA_HELPERS[erfa_ufunc.s2c] = helper_s2c
ERFA_HELPERS[erfa_ufunc.s2p] = helper_s2p
ERFA_HELPERS[erfa_ufunc.c2s] = helper_c2s
ERFA_HELPERS[erfa_ufunc.p2s] = helper_p2s
ERFA_HELPERS[erfa_ufunc.pm] = helper_invariant
ERFA_HELPERS[erfa_ufunc.cpv] = helper_invariant
ERFA_HELPERS[erfa_ufunc.p2pv] = helper_p2pv
ERFA_HELPERS[erfa_ufunc.pv2p] = helper_pv2p
ERFA_HELPERS[erfa_ufunc.pv2s] = helper_pv2s
ERFA_HELPERS[erfa_ufunc.pvdpv] = helper_pv_multiplication
ERFA_HELPERS[erfa_ufunc.pvxpv] = helper_pv_multiplication
ERFA_HELPERS[erfa_ufunc.pvm] = helper_pvm
ERFA_HELPERS[erfa_ufunc.pvmpv] = helper_twoarg_invariant
ERFA_HELPERS[erfa_ufunc.pvppv] = helper_twoarg_invariant
ERFA_HELPERS[erfa_ufunc.pvstar] = helper_pvstar
ERFA_HELPERS[erfa_ufunc.pvtob] = helper_pvtob
ERFA_HELPERS[erfa_ufunc.pvu] = helper_pvu
ERFA_HELPERS[erfa_ufunc.pvup] = helper_pvup
ERFA_HELPERS[erfa_ufunc.pdp] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.pxp] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.rxp] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.rxpv] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.s2pv] = helper_s2pv
ERFA_HELPERS[erfa_ufunc.s2xpv] = helper_s2xpv
ERFA_HELPERS[erfa_ufunc.starpv] = helper_starpv
ERFA_HELPERS[erfa_ufunc.sxpv] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.trxpv] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.gc2gd] = helper_gc2gd
ERFA_HELPERS[erfa_ufunc.gd2gc] = helper_gd2gc
ERFA_HELPERS[erfa_ufunc.ldn] = helper_ldn
ERFA_HELPERS[erfa_ufunc.aper] = helper_aper
ERFA_HELPERS[erfa_ufunc.apio] = helper_apio
ERFA_HELPERS[erfa_ufunc.atciq] = helper_atciq
ERFA_HELPERS[erfa_ufunc.atciqn] = helper_atciqn
ERFA_HELPERS[erfa_ufunc.atciqz] = helper_atciqz_aticq
ERFA_HELPERS[erfa_ufunc.aticq] = helper_atciqz_aticq
ERFA_HELPERS[erfa_ufunc.aticqn] = helper_aticqn
ERFA_HELPERS[erfa_ufunc.atioq] = helper_atioq
ERFA_HELPERS[erfa_ufunc.atoiq] = helper_atoiq
return ERFA_HELPERS
UFUNC_HELPERS.register_module('erfa.ufunc', erfa_ufuncs,
get_erfa_helpers)
|
89dce95350995d001a5c93223fd4bc547b3680c1220afdde7b641d5c121a2702 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license. See LICENSE.rst except
# for parts explicitly labelled as being (largely) copies of numpy
# implementations; for those, see licenses/NUMPY_LICENSE.rst.
"""Helpers for overriding numpy functions.
We override numpy functions in `~astropy.units.Quantity.__array_function__`.
In this module, the numpy functions are split in four groups, each of
which has an associated `set` or `dict`:
1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
supports Quantity; we pass on to ndarray.__array_function__.
2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
after converting quantities to arrays with suitable units,
and possibly setting units on the result.
3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
requires a Quantity-specific implementation
4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
For the FUNCTION_HELPERS `dict`, the value is a function that does the
unit conversion. It should take the same arguments as the numpy
function would (though one can use ``*args`` and ``**kwargs``) and
return a tuple of ``args, kwargs, unit, out``, where ``args`` and
``kwargs`` will be will be passed on to the numpy implementation,
``unit`` is a possible unit of the result (`None` if it should not be
converted to Quantity), and ``out`` is a possible output Quantity passed
in, which will be filled in-place.
For the DISPATCHED_FUNCTIONS `dict`, the value is a function that
implements the numpy functionality for Quantity input. It should
return a tuple of ``result, unit, out``, where ``result`` is generally
a plain array with the result, and ``unit`` and ``out`` are as above.
If unit is `None`, result gets returned directly, so one can also
return a Quantity directly using ``quantity_result, None, None``.
"""
import functools
import operator
import numpy as np
from numpy.lib import recfunctions as rfn
from astropy.units.core import UnitsError, UnitTypeError, dimensionless_unscaled
from astropy.utils import isiterable
from astropy.utils.compat import NUMPY_LT_1_20, NUMPY_LT_1_23
# In 1.17, overrides are enabled by default, but it is still possible to
# turn them off using an environment variable. We use getattr since it
# is planned to remove that possibility in later numpy versions.
ARRAY_FUNCTION_ENABLED = getattr(np.core.overrides,
'ENABLE_ARRAY_FUNCTION', True)
SUBCLASS_SAFE_FUNCTIONS = set()
"""Functions with implementations supporting subclasses like Quantity."""
FUNCTION_HELPERS = {}
"""Functions with implementations usable with proper unit conversion."""
DISPATCHED_FUNCTIONS = {}
"""Functions for which we provide our own implementation."""
UNSUPPORTED_FUNCTIONS = set()
"""Functions that cannot sensibly be used with quantities."""
SUBCLASS_SAFE_FUNCTIONS |= {
np.shape, np.size, np.ndim,
np.reshape, np.ravel, np.moveaxis, np.rollaxis, np.swapaxes,
np.transpose, np.atleast_1d, np.atleast_2d, np.atleast_3d,
np.expand_dims, np.squeeze, np.broadcast_to, np.broadcast_arrays,
np.flip, np.fliplr, np.flipud, np.rot90,
np.argmin, np.argmax, np.argsort, np.lexsort, np.searchsorted,
np.nonzero, np.argwhere, np.flatnonzero,
np.diag_indices_from, np.triu_indices_from, np.tril_indices_from,
np.real, np.imag, np.diagonal, np.diagflat,
np.empty_like,
np.compress, np.extract, np.delete, np.trim_zeros, np.roll, np.take,
np.put, np.fill_diagonal, np.tile, np.repeat,
np.split, np.array_split, np.hsplit, np.vsplit, np.dsplit,
np.stack, np.column_stack, np.hstack, np.vstack, np.dstack,
np.amax, np.amin, np.ptp, np.sum, np.cumsum,
np.prod, np.product, np.cumprod, np.cumproduct,
np.round, np.around,
np.fix, np.angle, np.i0, np.clip,
np.isposinf, np.isneginf, np.isreal, np.iscomplex,
np.average, np.mean, np.std, np.var, np.median, np.trace,
np.nanmax, np.nanmin, np.nanargmin, np.nanargmax, np.nanmean,
np.nanmedian, np.nansum, np.nancumsum, np.nanstd, np.nanvar,
np.nanprod, np.nancumprod,
np.einsum_path, np.trapz, np.linspace,
np.sort, np.msort, np.partition, np.meshgrid,
np.common_type, np.result_type, np.can_cast, np.min_scalar_type,
np.iscomplexobj, np.isrealobj,
np.shares_memory, np.may_share_memory,
np.apply_along_axis, np.take_along_axis, np.put_along_axis,
np.linalg.cond, np.linalg.multi_dot}
# Implemented as methods on Quantity:
# np.ediff1d is from setops, but we support it anyway; the others
# currently return NotImplementedError.
# TODO: move latter to UNSUPPORTED? Would raise TypeError instead.
SUBCLASS_SAFE_FUNCTIONS |= {np.ediff1d}
# Nonsensical for quantities.
UNSUPPORTED_FUNCTIONS |= {
np.packbits, np.unpackbits, np.unravel_index,
np.ravel_multi_index, np.ix_, np.cov, np.corrcoef,
np.busday_count, np.busday_offset, np.datetime_as_string,
np.is_busday, np.all, np.any, np.sometrue, np.alltrue}
# Could be supported if we had a natural logarithm unit.
UNSUPPORTED_FUNCTIONS |= {np.linalg.slogdet}
# TODO! support whichever of these functions it makes sense to support
TBD_FUNCTIONS = {
rfn.drop_fields, rfn.rename_fields, rfn.append_fields, rfn.join_by,
rfn.apply_along_fields, rfn.assign_fields_by_name, rfn.merge_arrays,
rfn.find_duplicates, rfn.recursive_fill_fields, rfn.require_fields,
rfn.repack_fields, rfn.stack_arrays
}
UNSUPPORTED_FUNCTIONS |= TBD_FUNCTIONS
# The following are not just unsupported, but so unlikely to be thought
# to be supported that we ignore them in testing. (Kept in a separate
# variable so that we can check consistency in the test routine -
# test_quantity_non_ufuncs.py)
IGNORED_FUNCTIONS = {
# I/O - useless for Quantity, since no way to store the unit.
np.save, np.savez, np.savetxt, np.savez_compressed,
# Polynomials
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
# functions taking record arrays (which are deprecated)
rfn.rec_append_fields, rfn.rec_drop_fields, rfn.rec_join,
}
if NUMPY_LT_1_20:
# financial
IGNORED_FUNCTIONS |= {np.fv, np.ipmt, np.irr, np.mirr, np.nper,
np.npv, np.pmt, np.ppmt, np.pv, np.rate}
if NUMPY_LT_1_23:
IGNORED_FUNCTIONS |= {
# Deprecated, removed in numpy 1.23
np.asscalar, np.alen,
}
UNSUPPORTED_FUNCTIONS |= IGNORED_FUNCTIONS
class FunctionAssigner:
def __init__(self, assignments):
self.assignments = assignments
def __call__(self, f=None, helps=None, module=np):
"""Add a helper to a numpy function.
Normally used as a decorator.
If ``helps`` is given, it should be the numpy function helped (or an
iterable of numpy functions helped).
If ``helps`` is not given, it is assumed the function helped is the
numpy function with the same name as the decorated function.
"""
if f is not None:
if helps is None:
helps = getattr(module, f.__name__)
if not isiterable(helps):
helps = (helps,)
for h in helps:
self.assignments[h] = f
return f
elif helps is not None or module is not np:
return functools.partial(self.__call__, helps=helps, module=module)
else: # pragma: no cover
raise ValueError("function_helper requires at least one argument.")
function_helper = FunctionAssigner(FUNCTION_HELPERS)
dispatched_function = FunctionAssigner(DISPATCHED_FUNCTIONS)
@function_helper(helps={
np.copy, np.asfarray, np.real_if_close, np.sort_complex, np.resize,
np.fft.fft, np.fft.ifft, np.fft.rfft, np.fft.irfft,
np.fft.fft2, np.fft.ifft2, np.fft.rfft2, np.fft.irfft2,
np.fft.fftn, np.fft.ifftn, np.fft.rfftn, np.fft.irfftn,
np.fft.hfft, np.fft.ihfft,
np.linalg.eigvals, np.linalg.eigvalsh})
def invariant_a_helper(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, a.unit, None
@function_helper(helps={np.tril, np.triu})
def invariant_m_helper(m, *args, **kwargs):
return (m.view(np.ndarray),) + args, kwargs, m.unit, None
@function_helper(helps={np.fft.fftshift, np.fft.ifftshift})
def invariant_x_helper(x, *args, **kwargs):
return (x.view(np.ndarray),) + args, kwargs, x.unit, None
# Note that ones_like does *not* work by default since if one creates an empty
# array with a unit, one cannot just fill it with unity. Indeed, in this
# respect, it is a bit of an odd function for Quantity. On the other hand, it
# matches the idea that a unit is the same as the quantity with that unit and
# value of 1. Also, it used to work without __array_function__.
# zeros_like does work by default for regular quantities, because numpy first
# creates an empty array with the unit and then fills it with 0 (which can have
# any unit), but for structured dtype this fails (0 cannot have an arbitrary
# structured unit), so we include it here too.
@function_helper(helps={np.ones_like, np.zeros_like})
def like_helper(a, *args, **kwargs):
subok = args[2] if len(args) > 2 else kwargs.pop('subok', True)
unit = a.unit if subok else None
return (a.view(np.ndarray),) + args, kwargs, unit, None
@function_helper
def sinc(x):
from astropy.units.si import radian
try:
x = x.to_value(radian)
except UnitsError:
raise UnitTypeError("Can only apply 'sinc' function to "
"quantities with angle units")
return (x,), {}, dimensionless_unscaled, None
@dispatched_function
def unwrap(p, discont=None, axis=-1):
from astropy.units.si import radian
if discont is None:
discont = np.pi << radian
p, discont = _as_quantities(p, discont)
result = np.unwrap.__wrapped__(p.to_value(radian),
discont.to_value(radian), axis=axis)
result = radian.to(p.unit, result)
return result, p.unit, None
@function_helper
def argpartition(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, None, None
@function_helper
def full_like(a, fill_value, *args, **kwargs):
unit = a.unit if kwargs.get('subok', True) else None
return (a.view(np.ndarray),
a._to_own_unit(fill_value)) + args, kwargs, unit, None
@function_helper
def putmask(a, mask, values):
from astropy.units import Quantity
if isinstance(a, Quantity):
return (a.view(np.ndarray), mask,
a._to_own_unit(values)), {}, a.unit, None
elif isinstance(values, Quantity):
return (a, mask,
values.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def place(arr, mask, vals):
from astropy.units import Quantity
if isinstance(arr, Quantity):
return (arr.view(np.ndarray), mask,
arr._to_own_unit(vals)), {}, arr.unit, None
elif isinstance(vals, Quantity):
return (arr, mask,
vals.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def copyto(dst, src, *args, **kwargs):
from astropy.units import Quantity
if isinstance(dst, Quantity):
return ((dst.view(np.ndarray), dst._to_own_unit(src)) + args,
kwargs, None, None)
elif isinstance(src, Quantity):
return ((dst, src.to_value(dimensionless_unscaled)) + args,
kwargs, None, None)
else:
raise NotImplementedError
@function_helper
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
nan = x._to_own_unit(nan)
if posinf is not None:
posinf = x._to_own_unit(posinf)
if neginf is not None:
neginf = x._to_own_unit(neginf)
return ((x.view(np.ndarray),),
dict(copy=True, nan=nan, posinf=posinf, neginf=neginf),
x.unit, None)
def _as_quantity(a):
"""Convert argument to a Quantity (or raise NotImplementedError)."""
from astropy.units import Quantity
try:
return Quantity(a, copy=False, subok=True)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _as_quantities(*args):
"""Convert arguments to Quantity (or raise NotImplentedError)."""
from astropy.units import Quantity
try:
return tuple(Quantity(a, copy=False, subok=True)
for a in args)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _quantities2arrays(*args, unit_from_first=False):
"""Convert to arrays in units of the first argument that has a unit.
If unit_from_first, take the unit of the first argument regardless
whether it actually defined a unit (e.g., dimensionless for arrays).
"""
# Turn first argument into a quantity.
q = _as_quantity(args[0])
if len(args) == 1:
return (q.value,), q.unit
# If we care about the unit being explicit, then check whether this
# argument actually had a unit, or was likely inferred.
if not unit_from_first and (q.unit is q._default_unit
and not hasattr(args[0], 'unit')):
# Here, the argument could still be things like [10*u.one, 11.*u.one]),
# i.e., properly dimensionless. So, we only override with anything
# that has a unit not equivalent to dimensionless (fine to ignore other
# dimensionless units pass, even if explicitly given).
for arg in args[1:]:
trial = _as_quantity(arg)
if not trial.unit.is_equivalent(q.unit):
# Use any explicit unit not equivalent to dimensionless.
q = trial
break
# We use the private _to_own_unit method here instead of just
# converting everything to quantity and then do .to_value(qs0.unit)
# as we want to allow arbitrary unit for 0, inf, and nan.
try:
arrays = tuple((q._to_own_unit(arg)) for arg in args)
except TypeError:
raise NotImplementedError
return arrays, q.unit
def _iterable_helper(*args, out=None, **kwargs):
"""Convert arguments to Quantity, and treat possible 'out'."""
from astropy.units import Quantity
if out is not None:
if isinstance(out, Quantity):
kwargs['out'] = out.view(np.ndarray)
else:
# TODO: for an ndarray output, we could in principle
# try converting all Quantity to dimensionless.
raise NotImplementedError
arrays, unit = _quantities2arrays(*args)
return arrays, kwargs, unit, out
@function_helper
def concatenate(arrays, axis=0, out=None):
# TODO: make this smarter by creating an appropriately shaped
# empty output array and just filling it.
arrays, kwargs, unit, out = _iterable_helper(*arrays, out=out, axis=axis)
return (arrays,), kwargs, unit, out
@dispatched_function
def block(arrays):
# We need to override block since the numpy implementation can take two
# different paths, one for concatenation, one for creating a large empty
# result array in which parts are set. Each assumes array input and
# cannot be used directly. Since it would be very costly to inspect all
# arrays and then turn them back into a nested list, we just copy here the
# second implementation, np.core.shape_base._block_slicing, since it is
# shortest and easiest.
(arrays, list_ndim, result_ndim,
final_size) = np.core.shape_base._block_setup(arrays)
shape, slices, arrays = np.core.shape_base._block_info_recursion(
arrays, list_ndim, result_ndim)
# Here, one line of difference!
arrays, unit = _quantities2arrays(*arrays)
# Back to _block_slicing
dtype = np.result_type(*[arr.dtype for arr in arrays])
F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
order = 'F' if F_order and not C_order else 'C'
result = np.empty(shape=shape, dtype=dtype, order=order)
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result, unit, None
@function_helper
def choose(a, choices, out=None, **kwargs):
choices, kwargs, unit, out = _iterable_helper(*choices, out=out, **kwargs)
return (a, choices,), kwargs, unit, out
@function_helper
def select(condlist, choicelist, default=0):
choicelist, kwargs, unit, out = _iterable_helper(*choicelist)
if default != 0:
default = (1 * unit)._to_own_unit(default)
return (condlist, choicelist, default), kwargs, unit, out
@dispatched_function
def piecewise(x, condlist, funclist, *args, **kw):
from astropy.units import Quantity
# Copied implementation from numpy.lib.function_base.piecewise,
# taking care of units of function outputs.
n2 = len(funclist)
# undocumented: single condition is promoted to a list of one condition
if np.isscalar(condlist) or (
not isinstance(condlist[0], (list, np.ndarray)) and x.ndim != 0):
condlist = [condlist]
if any(isinstance(c, Quantity) for c in condlist):
raise NotImplementedError
condlist = np.array(condlist, dtype=bool)
n = len(condlist)
if n == n2 - 1: # compute the "otherwise" condition.
condelse = ~np.any(condlist, axis=0, keepdims=True)
condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
elif n != n2:
raise ValueError(
f"with {n} condition(s), either {n} or {n + 1} functions are expected"
)
y = np.zeros(x.shape, x.dtype)
where = []
what = []
for k in range(n):
item = funclist[k]
if not callable(item):
where.append(condlist[k])
what.append(item)
else:
vals = x[condlist[k]]
if vals.size > 0:
where.append(condlist[k])
what.append(item(vals, *args, **kw))
what, unit = _quantities2arrays(*what)
for item, value in zip(where, what):
y[item] = value
return y, unit, None
@function_helper
def append(arr, values, *args, **kwargs):
arrays, unit = _quantities2arrays(arr, values, unit_from_first=True)
return arrays + args, kwargs, unit, None
@function_helper
def insert(arr, obj, values, *args, **kwargs):
from astropy.units import Quantity
if isinstance(obj, Quantity):
raise NotImplementedError
(arr, values), unit = _quantities2arrays(arr, values,
unit_from_first=True)
return (arr, obj, values) + args, kwargs, unit, None
@function_helper
def pad(array, pad_width, mode='constant', **kwargs):
# pad dispatches only on array, so that must be a Quantity.
for key in 'constant_values', 'end_values':
value = kwargs.pop(key, None)
if value is None:
continue
if not isinstance(value, tuple):
value = (value,)
new_value = []
for v in value:
new_value.append(
tuple(array._to_own_unit(_v) for _v in v)
if isinstance(v, tuple) else array._to_own_unit(v))
kwargs[key] = new_value
return (array.view(np.ndarray), pad_width, mode), kwargs, array.unit, None
@function_helper
def where(condition, *args):
from astropy.units import Quantity
if isinstance(condition, Quantity) or len(args) != 2:
raise NotImplementedError
args, unit = _quantities2arrays(*args)
return (condition,) + args, {}, unit, None
@function_helper(helps=({np.quantile, np.nanquantile}))
def quantile(a, q, *args, _q_unit=dimensionless_unscaled, **kwargs):
if len(args) >= 2:
out = args[1]
args = args[:1] + args[2:]
else:
out = kwargs.pop('out', None)
from astropy.units import Quantity
if isinstance(q, Quantity):
q = q.to_value(_q_unit)
(a,), kwargs, unit, out = _iterable_helper(a, out=out, **kwargs)
return (a, q) + args, kwargs, unit, out
@function_helper(helps={np.percentile, np.nanpercentile})
def percentile(a, q, *args, **kwargs):
from astropy.units import percent
return quantile(a, q, *args, _q_unit=percent, **kwargs)
@function_helper
def count_nonzero(a, *args, **kwargs):
return (a.value,) + args, kwargs, None, None
@function_helper(helps={np.isclose, np.allclose})
def close(a, b, rtol=1e-05, atol=1e-08, *args, **kwargs):
from astropy.units import Quantity
(a, b), unit = _quantities2arrays(a, b, unit_from_first=True)
# Allow number without a unit as having the unit.
atol = Quantity(atol, unit).value
return (a, b, rtol, atol) + args, kwargs, None, None
@function_helper
def array_equal(a1, a2):
args, unit = _quantities2arrays(a1, a2)
return args, {}, None, None
@function_helper
def array_equiv(a1, a2):
args, unit = _quantities2arrays(a1, a2)
return args, {}, None, None
@function_helper(helps={np.dot, np.outer})
def dot_like(a, b, out=None):
from astropy.units import Quantity
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
return tuple(x.view(np.ndarray) for x in (a, b, out)), {}, unit, out
else:
return (a.view(np.ndarray), b.view(np.ndarray)), {}, unit, None
@function_helper(helps={np.cross, np.inner, np.vdot, np.tensordot, np.kron,
np.correlate, np.convolve})
def cross_like(a, b, *args, **kwargs):
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
return (a.view(np.ndarray), b.view(np.ndarray)) + args, kwargs, unit, None
@function_helper
def einsum(subscripts, *operands, out=None, **kwargs):
from astropy.units import Quantity
if not isinstance(subscripts, str):
raise ValueError('only "subscripts" string mode supported for einsum.')
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
else:
kwargs['out'] = out.view(np.ndarray)
qs = _as_quantities(*operands)
unit = functools.reduce(operator.mul, (q.unit for q in qs),
dimensionless_unscaled)
arrays = tuple(q.view(np.ndarray) for q in qs)
return (subscripts,) + arrays, kwargs, unit, out
@function_helper
def bincount(x, weights=None, minlength=0):
from astropy.units import Quantity
if isinstance(x, Quantity):
raise NotImplementedError
return (x, weights.value, minlength), {}, weights.unit, None
@function_helper
def digitize(x, bins, *args, **kwargs):
arrays, unit = _quantities2arrays(x, bins, unit_from_first=True)
return arrays + args, kwargs, None, None
def _check_bins(bins, unit):
from astropy.units import Quantity
check = _as_quantity(bins)
if check.ndim > 0:
return check.to_value(unit)
elif isinstance(bins, Quantity):
# bins should be an integer (or at least definitely not a Quantity).
raise NotImplementedError
else:
return bins
@function_helper
def histogram(a, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
if density:
unit = (unit or 1) / a.unit
return ((a.value, bins, range), {'weights': weights, 'density': density},
(unit, a.unit), None)
@function_helper(helps=np.histogram_bin_edges)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
# weights is currently unused
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
return (a.value, bins, range, weights), {}, a.unit, None
@function_helper
def histogram2d(x, y, bins=10, range=None, weights=None, density=None):
from astropy.units import Quantity
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
x, y = _as_quantities(x, y)
try:
n = len(bins)
except TypeError:
# bins should be an integer (or at least definitely not a Quantity).
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if n == 1:
raise NotImplementedError
elif n == 2 and not isinstance(bins, Quantity):
bins = [_check_bins(b, unit)
for (b, unit) in zip(bins, (x.unit, y.unit))]
else:
bins = _check_bins(bins, x.unit)
y = y.to(x.unit)
if density:
unit = (unit or 1) / x.unit / y.unit
return ((x.value, y.value, bins, range),
{'weights': weights, 'density': density},
(unit, x.unit, y.unit), None)
@function_helper
def histogramdd(sample, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
try:
# Sample is an ND-array.
_, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = _as_quantities(*sample)
sample_units = [s.unit for s in sample]
sample = [s.value for s in sample]
D = len(sample)
else:
sample = _as_quantity(sample)
sample_units = [sample.unit] * D
try:
M = len(bins)
except TypeError:
# bins should be an integer
from astropy.units import Quantity
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
bins = [_check_bins(b, unit)
for (b, unit) in zip(bins, sample_units)]
if density:
unit = functools.reduce(operator.truediv, sample_units, (unit or 1))
return ((sample, bins, range), {'weights': weights, 'density': density},
(unit, sample_units), None)
@function_helper
def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
a = _as_quantity(a)
if prepend is not np._NoValue:
prepend = _as_quantity(prepend).to_value(a.unit)
if append is not np._NoValue:
append = _as_quantity(append).to_value(a.unit)
return (a.value, n, axis, prepend, append), {}, a.unit, None
@function_helper
def gradient(f, *varargs, **kwargs):
f = _as_quantity(f)
axis = kwargs.get('axis', None)
if axis is None:
n_axis = f.ndim
elif isinstance(axis, tuple):
n_axis = len(axis)
else:
n_axis = 1
if varargs:
varargs = _as_quantities(*varargs)
if len(varargs) == 1 and n_axis > 1:
varargs = varargs * n_axis
if varargs:
units = [f.unit / q.unit for q in varargs]
varargs = tuple(q.value for q in varargs)
else:
units = [f.unit] * n_axis
if len(units) == 1:
units = units[0]
return (f.value,) + varargs, kwargs, units, None
@function_helper
def logspace(start, stop, *args, **kwargs):
from astropy.units import LogQuantity, dex
if (not isinstance(start, LogQuantity) or
not isinstance(stop, LogQuantity)):
raise NotImplementedError
# Get unit from end point as for linspace.
stop = stop.to(dex(stop.unit.physical_unit))
start = start.to(stop.unit)
unit = stop.unit.physical_unit
return (start.value, stop.value) + args, kwargs, unit, None
@function_helper
def geomspace(start, stop, *args, **kwargs):
# Get unit from end point as for linspace.
(stop, start), unit = _quantities2arrays(stop, start)
return (start, stop) + args, kwargs, unit, None
@function_helper
def interp(x, xp, fp, *args, **kwargs):
from astropy.units import Quantity
(x, xp), _ = _quantities2arrays(x, xp)
if isinstance(fp, Quantity):
unit = fp.unit
fp = fp.value
else:
unit = None
return (x, xp, fp) + args, kwargs, unit, None
@function_helper
def unique(ar, return_index=False, return_inverse=False,
return_counts=False, axis=None):
unit = ar.unit
n_index = sum(bool(i) for i in
(return_index, return_inverse, return_counts))
if n_index:
unit = [unit] + n_index * [None]
return (ar.value, return_index, return_inverse, return_counts,
axis), {}, unit, None
@function_helper
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
if return_indices:
unit = [unit, None, None]
return (ar1, ar2, assume_unique, return_indices), {}, unit, None
@function_helper(helps=(np.setxor1d, np.union1d, np.setdiff1d))
def twosetop(ar1, ar2, *args, **kwargs):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
return (ar1, ar2) + args, kwargs, unit, None
@function_helper(helps=(np.isin, np.in1d))
def setcheckop(ar1, ar2, *args, **kwargs):
# This tests whether ar1 is in ar2, so we should change the unit of
# a1 to that of a2.
(ar2, ar1), unit = _quantities2arrays(ar2, ar1)
return (ar1, ar2) + args, kwargs, None, None
@dispatched_function
def apply_over_axes(func, a, axes):
# Copied straight from numpy/lib/shape_base, just to omit its
# val = asarray(a); if only it had been asanyarray, or just not there
# since a is assumed to an an array in the next line...
# Which is what we do here - we can only get here if it is a Quantity.
val = a
N = a.ndim
if np.array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = np.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
# Returning unit is None to signal nothing should happen to
# the output.
return val, None, None
@dispatched_function
def array_repr(arr, *args, **kwargs):
# TODO: The addition of "unit='...'" doesn't worry about line
# length. Could copy & adapt _array_repr_implementation from
# numpy.core.arrayprint.py
cls_name = arr.__class__.__name__
fake_name = '_' * len(cls_name)
fake_cls = type(fake_name, (np.ndarray,), {})
no_unit = np.array_repr(arr.view(fake_cls),
*args, **kwargs).replace(fake_name, cls_name)
unit_part = f"unit='{arr.unit}'"
pre, dtype, post = no_unit.rpartition('dtype')
if dtype:
return f"{pre}{unit_part}, {dtype}{post}", None, None
else:
return f"{no_unit[:-1]}, {unit_part})", None, None
@dispatched_function
def array_str(arr, *args, **kwargs):
# TODO: The addition of the unit doesn't worry about line length.
# Could copy & adapt _array_repr_implementation from
# numpy.core.arrayprint.py
no_unit = np.array_str(arr.value, *args, **kwargs)
return no_unit + arr._unitstr, None, None
@function_helper
def array2string(a, *args, **kwargs):
# array2string breaks on quantities as it tries to turn individual
# items into float, which works only for dimensionless. Since the
# defaults would not keep any unit anyway, this is rather pointless -
# we're better off just passing on the array view. However, one can
# also work around this by passing on a formatter (as is done in Angle).
# So, we do nothing if the formatter argument is present and has the
# relevant formatter for our dtype.
formatter = args[6] if len(args) >= 7 else kwargs.get('formatter', None)
if formatter is None:
a = a.value
else:
# See whether it covers our dtype.
from numpy.core.arrayprint import _get_format_function
with np.printoptions(formatter=formatter) as options:
try:
ff = _get_format_function(a.value, **options)
except Exception:
# Shouldn't happen, but possibly we're just not being smart
# enough, so let's pass things on as is.
pass
else:
# If the selected format function is that of numpy, we know
# things will fail
if 'numpy' in ff.__module__:
a = a.value
return (a,) + args, kwargs, None, None
@function_helper
def diag(v, *args, **kwargs):
# Function works for *getting* the diagonal, but not *setting*.
# So, override always.
return (v.value,) + args, kwargs, v.unit, None
@function_helper(module=np.linalg)
def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
unit = a.unit
if compute_uv:
unit = (None, unit, None)
return ((a.view(np.ndarray), full_matrices, compute_uv, hermitian),
{}, unit, None)
def _interpret_tol(tol, unit):
from astropy.units import Quantity
return Quantity(tol, unit).value
@function_helper(module=np.linalg)
def matrix_rank(M, tol=None, *args, **kwargs):
if tol is not None:
tol = _interpret_tol(tol, M.unit)
return (M.view(np.ndarray), tol) + args, kwargs, None, None
@function_helper(helps={np.linalg.inv, np.linalg.tensorinv})
def inv(a, *args, **kwargs):
return (a.view(np.ndarray),)+args, kwargs, 1/a.unit, None
@function_helper(module=np.linalg)
def pinv(a, rcond=1e-15, *args, **kwargs):
rcond = _interpret_tol(rcond, a.unit)
return (a.view(np.ndarray), rcond) + args, kwargs, 1/a.unit, None
@function_helper(module=np.linalg)
def det(a):
return (a.view(np.ndarray),), {}, a.unit ** a.shape[-1], None
@function_helper(helps={np.linalg.solve, np.linalg.tensorsolve})
def solve(a, b, *args, **kwargs):
a, b = _as_quantities(a, b)
return ((a.view(np.ndarray), b.view(np.ndarray)) + args, kwargs,
b.unit / a.unit, None)
@function_helper(module=np.linalg)
def lstsq(a, b, rcond="warn"):
a, b = _as_quantities(a, b)
if rcond not in (None, "warn", -1):
rcond = _interpret_tol(rcond, a.unit)
return ((a.view(np.ndarray), b.view(np.ndarray), rcond), {},
(b.unit / a.unit, b.unit ** 2, None, a.unit), None)
@function_helper(module=np.linalg)
def norm(x, ord=None, *args, **kwargs):
if ord == 0:
from astropy.units import dimensionless_unscaled
unit = dimensionless_unscaled
else:
unit = x.unit
return (x.view(np.ndarray), ord)+args, kwargs, unit, None
@function_helper(module=np.linalg)
def matrix_power(a, n):
return (a.value, n), {}, a.unit ** n, None
@function_helper(module=np.linalg)
def cholesky(a):
return (a.value,), {}, a.unit ** 0.5, None
@function_helper(module=np.linalg)
def qr(a, mode='reduced'):
if mode.startswith('e'):
units = None
elif mode == 'r':
units = a.unit
else:
from astropy.units import dimensionless_unscaled
units = (dimensionless_unscaled, a.unit)
return (a.value, mode), {}, units, None
@function_helper(helps={np.linalg.eig, np.linalg.eigh})
def eig(a, *args, **kwargs):
from astropy.units import dimensionless_unscaled
return (a.value,)+args, kwargs, (a.unit, dimensionless_unscaled), None
@function_helper(module=np.lib.recfunctions)
def structured_to_unstructured(arr, *args, **kwargs):
"""
Convert a structured quantity to an unstructured one.
This only works if all the units are compatible.
"""
from astropy.units import StructuredUnit
target_unit = arr.unit.values()[0]
def replace_unit(x):
if isinstance(x, StructuredUnit):
return x._recursively_apply(replace_unit)
else:
return target_unit
to_unit = arr.unit._recursively_apply(replace_unit)
return (arr.to_value(to_unit), ) + args, kwargs, target_unit, None
def _build_structured_unit(dtype, unit):
"""Build structured unit from dtype
Parameters
----------
dtype : `numpy.dtype`
unit : `astropy.units.Unit`
Returns
-------
`astropy.units.Unit` or tuple
"""
if dtype.fields is None:
return unit
return tuple(_build_structured_unit(v[0], unit) for v in dtype.fields.values())
@function_helper(module=np.lib.recfunctions)
def unstructured_to_structured(arr, dtype, *args, **kwargs):
from astropy.units import StructuredUnit
target_unit = StructuredUnit(_build_structured_unit(dtype, arr.unit))
return (arr.to_value(arr.unit), dtype) + args, kwargs, target_unit, None
|
a675cba587fc132d6dd486b76719d3da7f3d51d2712e8ee33e702473c542bcc0 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Quantity helpers for the scipy.special ufuncs.
Available ufuncs in this module are at
https://docs.scipy.org/doc/scipy/reference/special.html
"""
import numpy as np
from astropy.units.core import UnitsError, UnitTypeError, dimensionless_unscaled
from . import UFUNC_HELPERS
from .helpers import (
get_converter, helper_cbrt, helper_dimensionless_to_dimensionless, helper_two_arg_dimensionless)
# ufuncs that require dimensionless input and give dimensionless output.
dimensionless_to_dimensionless_sps_ufuncs = (
'erf', 'erfc', 'erfcx', 'erfi', 'erfinv', 'erfcinv',
'gamma', 'gammaln', 'loggamma', 'gammasgn', 'psi', 'rgamma', 'digamma',
'wofz', 'dawsn', 'entr', 'exprel', 'expm1', 'log1p', 'exp2', 'exp10',
'j0', 'j1', 'y0', 'y1', 'i0', 'i0e', 'i1', 'i1e',
'k0', 'k0e', 'k1', 'k1e', 'itj0y0', 'it2j0y0', 'iti0k0', 'it2i0k0',
'ndtr', 'ndtri')
scipy_special_ufuncs = dimensionless_to_dimensionless_sps_ufuncs
# ufuncs that require input in degrees and give dimensionless output.
degree_to_dimensionless_sps_ufuncs = ('cosdg', 'sindg', 'tandg', 'cotdg')
scipy_special_ufuncs += degree_to_dimensionless_sps_ufuncs
# ufuncs that require 2 dimensionless inputs and give dimensionless output.
# note: 'jv' and 'jn' are aliases in some scipy versions, which will
# cause the same key to be written twice, but since both are handled by the
# same helper there is no harm done.
two_arg_dimensionless_sps_ufuncs = (
'jv', 'jn', 'jve', 'yn', 'yv', 'yve', 'kn', 'kv', 'kve', 'iv', 'ive',
'hankel1', 'hankel1e', 'hankel2', 'hankel2e')
scipy_special_ufuncs += two_arg_dimensionless_sps_ufuncs
# ufuncs handled as special cases
scipy_special_ufuncs += ('cbrt', 'radian')
def helper_degree_to_dimensionless(f, unit):
from astropy.units.si import degree
try:
return [get_converter(unit, degree)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_degree_minute_second_to_radian(f, unit1, unit2, unit3):
from astropy.units.si import arcmin, arcsec, degree, radian
try:
return [get_converter(unit1, degree),
get_converter(unit2, arcmin),
get_converter(unit3, arcsec)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"quantities with angle units"
.format(f.__name__))
def get_scipy_special_helpers():
import scipy.special as sps
SCIPY_HELPERS = {}
for name in dimensionless_to_dimensionless_sps_ufuncs:
# In SCIPY_LT_1_5, erfinv and erfcinv are not ufuncs.
ufunc = getattr(sps, name, None)
if isinstance(ufunc, np.ufunc):
SCIPY_HELPERS[ufunc] = helper_dimensionless_to_dimensionless
for ufunc in degree_to_dimensionless_sps_ufuncs:
SCIPY_HELPERS[getattr(sps, ufunc)] = helper_degree_to_dimensionless
for ufunc in two_arg_dimensionless_sps_ufuncs:
SCIPY_HELPERS[getattr(sps, ufunc)] = helper_two_arg_dimensionless
# ufuncs handled as special cases
SCIPY_HELPERS[sps.cbrt] = helper_cbrt
SCIPY_HELPERS[sps.radian] = helper_degree_minute_second_to_radian
return SCIPY_HELPERS
UFUNC_HELPERS.register_module('scipy.special', scipy_special_ufuncs,
get_scipy_special_helpers)
|
ac7ecae01643733f4ed4506f8f08f9668a79acdefee6af4ccbbd7b4875f47c64 | # coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Separate tests specifically for equivalencies."""
import numpy as np
# THIRD-PARTY
import pytest
from numpy.testing import assert_allclose
# LOCAL
from astropy import constants
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.units.equivalencies import Equivalency
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_dimensionless_angles():
# test that the angles_dimensionless option allows one to change
# by any order in radian in the unit (#1161)
rad1 = u.dimensionless_angles()
assert u.radian.to(1, equivalencies=rad1) == 1.
assert u.deg.to(1, equivalencies=rad1) == u.deg.to(u.rad)
assert u.steradian.to(1, equivalencies=rad1) == 1.
assert u.dimensionless_unscaled.to(u.steradian, equivalencies=rad1) == 1.
# now quantities
assert (1.*u.radian).to_value(1, equivalencies=rad1) == 1.
assert (1.*u.deg).to_value(1, equivalencies=rad1) == u.deg.to(u.rad)
assert (1.*u.steradian).to_value(1, equivalencies=rad1) == 1.
# more complicated example
I = 1.e45 * u.g * u.cm**2 # noqa
Omega = u.cycle / (1.*u.s)
Erot = 0.5 * I * Omega**2
# check that equivalency makes this work
Erot_in_erg1 = Erot.to(u.erg, equivalencies=rad1)
# and check that value is correct
assert_allclose(Erot_in_erg1.value, (Erot/u.radian**2).to_value(u.erg))
# test built-in equivalency in subclass
class MyRad1(u.Quantity):
_equivalencies = rad1
phase = MyRad1(1., u.cycle)
assert phase.to_value(1) == u.cycle.to(u.radian)
@pytest.mark.parametrize('log_unit', (u.mag, u.dex, u.dB))
def test_logarithmic(log_unit):
# check conversion of mag, dB, and dex to dimensionless and vice versa
with pytest.raises(u.UnitsError):
log_unit.to(1, 0.)
with pytest.raises(u.UnitsError):
u.dimensionless_unscaled.to(log_unit)
assert log_unit.to(1, 0., equivalencies=u.logarithmic()) == 1.
assert u.dimensionless_unscaled.to(log_unit,
equivalencies=u.logarithmic()) == 0.
# also try with quantities
q_dex = np.array([0., -1., 1., 2.]) * u.dex
q_expected = 10.**q_dex.value * u.dimensionless_unscaled
q_log_unit = q_dex.to(log_unit)
assert np.all(q_log_unit.to(1, equivalencies=u.logarithmic()) ==
q_expected)
assert np.all(q_expected.to(log_unit, equivalencies=u.logarithmic()) ==
q_log_unit)
with u.set_enabled_equivalencies(u.logarithmic()):
assert np.all(np.abs(q_log_unit - q_expected.to(log_unit)) <
1.e-10*log_unit)
doppler_functions = [u.doppler_optical, u.doppler_radio, u.doppler_relativistic]
@pytest.mark.parametrize(('function'), doppler_functions)
def test_doppler_frequency_0(function):
rest = 105.01 * u.GHz
velo0 = rest.to(u.km/u.s, equivalencies=function(rest))
assert velo0.value == 0
@pytest.mark.parametrize(('function'), doppler_functions)
def test_doppler_wavelength_0(function):
rest = 105.01 * u.GHz
q1 = 0.00285489437196 * u.m
velo0 = q1.to(u.km/u.s, equivalencies=function(rest))
np.testing.assert_almost_equal(velo0.value, 0, decimal=6)
@pytest.mark.parametrize(('function'), doppler_functions)
def test_doppler_energy_0(function):
rest = 105.01 * u.GHz
q1 = 0.0004342864648539744 * u.eV
velo0 = q1.to(u.km/u.s, equivalencies=function(rest))
np.testing.assert_almost_equal(velo0.value, 0, decimal=6)
@pytest.mark.parametrize(('function'), doppler_functions)
def test_doppler_frequency_circle(function):
rest = 105.01 * u.GHz
shifted = 105.03 * u.GHz
velo = shifted.to(u.km/u.s, equivalencies=function(rest))
freq = velo.to(u.GHz, equivalencies=function(rest))
np.testing.assert_almost_equal(freq.value, shifted.value, decimal=7)
@pytest.mark.parametrize(('function'), doppler_functions)
def test_doppler_wavelength_circle(function):
rest = 105.01 * u.nm
shifted = 105.03 * u.nm
velo = shifted.to(u.km / u.s, equivalencies=function(rest))
wav = velo.to(u.nm, equivalencies=function(rest))
np.testing.assert_almost_equal(wav.value, shifted.value, decimal=7)
@pytest.mark.parametrize(('function'), doppler_functions)
def test_doppler_energy_circle(function):
rest = 1.0501 * u.eV
shifted = 1.0503 * u.eV
velo = shifted.to(u.km / u.s, equivalencies=function(rest))
en = velo.to(u.eV, equivalencies=function(rest))
np.testing.assert_almost_equal(en.value, shifted.value, decimal=7)
values_ghz = (999.899940784289, 999.8999307714406, 999.8999357778647)
@pytest.mark.parametrize(('function', 'value'),
list(zip(doppler_functions, values_ghz)))
def test_30kms(function, value):
rest = 1000 * u.GHz
velo = 30 * u.km/u.s
shifted = velo.to(u.GHz, equivalencies=function(rest))
np.testing.assert_almost_equal(shifted.value, value, decimal=7)
bad_values = (5, 5*u.Jy, None)
@pytest.mark.parametrize(('function', 'value'),
list(zip(doppler_functions, bad_values)))
def test_bad_restfreqs(function, value):
with pytest.raises(u.UnitsError):
function(value)
@pytest.mark.parametrize(('z', 'rv_ans'),
[(0, 0 * (u.km / u.s)),
(0.001, 299642.56184583 * (u.m / u.s)),
(-1, -2.99792458e8 * (u.m / u.s))])
def test_doppler_redshift(z, rv_ans):
z_in = z * u.dimensionless_unscaled
rv_out = z_in.to(u.km / u.s, u.doppler_redshift())
z_out = rv_out.to(u.dimensionless_unscaled, u.doppler_redshift())
assert_quantity_allclose(rv_out, rv_ans)
assert_quantity_allclose(z_out, z_in) # Check roundtrip
def test_doppler_redshift_no_cosmology():
from astropy.cosmology.units import redshift
with pytest.raises(u.UnitConversionError, match='not convertible'):
(0 * (u.km / u.s)).to(redshift, u.doppler_redshift())
def test_massenergy():
# The relative tolerance of these tests is set by the uncertainties
# in the charge of the electron, which is known to about
# 3e-9 (relative tolerance). Therefore, we limit the
# precision of the tests to 1e-7 to be safe. The masses are
# (loosely) known to ~ 5e-8 rel tolerance, so we couldn't test to
# 1e-7 if we used the values from astropy.constants; that is,
# they might change by more than 1e-7 in some future update, so instead
# they are hardwired here.
# Electron, proton, neutron, muon, 1g
mass_eV = u.Quantity([510.998928e3, 938.272046e6, 939.565378e6,
105.6583715e6, 5.60958884539e32], u.eV)
mass_g = u.Quantity([9.10938291e-28, 1.672621777e-24, 1.674927351e-24,
1.88353147e-25, 1], u.g)
# Test both ways
assert np.allclose(mass_eV.to_value(u.g, equivalencies=u.mass_energy()),
mass_g.value, rtol=1e-7)
assert np.allclose(mass_g.to_value(u.eV, equivalencies=u.mass_energy()),
mass_eV.value, rtol=1e-7)
# Basic tests of 'derived' equivalencies
# Surface density
sdens_eV = u.Quantity(5.60958884539e32, u.eV / u.m**2)
sdens_g = u.Quantity(1e-4, u.g / u.cm**2)
assert np.allclose(sdens_eV.to_value(u.g / u.cm**2,
equivalencies=u.mass_energy()),
sdens_g.value, rtol=1e-7)
assert np.allclose(sdens_g.to_value(u.eV / u.m**2,
equivalencies=u.mass_energy()),
sdens_eV.value, rtol=1e-7)
# Density
dens_eV = u.Quantity(5.60958884539e32, u.eV / u.m**3)
dens_g = u.Quantity(1e-6, u.g / u.cm**3)
assert np.allclose(dens_eV.to_value(u.g / u.cm**3,
equivalencies=u.mass_energy()),
dens_g.value, rtol=1e-7)
assert np.allclose(dens_g.to_value(u.eV / u.m**3,
equivalencies=u.mass_energy()),
dens_eV.value, rtol=1e-7)
# Power
pow_eV = u.Quantity(5.60958884539e32, u.eV / u.s)
pow_g = u.Quantity(1, u.g / u.s)
assert np.allclose(pow_eV.to_value(u.g / u.s,
equivalencies=u.mass_energy()),
pow_g.value, rtol=1e-7)
assert np.allclose(pow_g.to_value(u.eV / u.s,
equivalencies=u.mass_energy()),
pow_eV.value, rtol=1e-7)
def test_is_equivalent():
assert u.m.is_equivalent(u.pc)
assert u.cycle.is_equivalent(u.mas)
assert not u.cycle.is_equivalent(u.dimensionless_unscaled)
assert u.cycle.is_equivalent(u.dimensionless_unscaled,
u.dimensionless_angles())
assert not (u.Hz.is_equivalent(u.J))
assert u.Hz.is_equivalent(u.J, u.spectral())
assert u.J.is_equivalent(u.Hz, u.spectral())
assert u.pc.is_equivalent(u.arcsecond, u.parallax())
assert u.arcminute.is_equivalent(u.au, u.parallax())
# Pass a tuple for multiple possibilities
assert u.cm.is_equivalent((u.m, u.s, u.kg))
assert u.ms.is_equivalent((u.m, u.s, u.kg))
assert u.g.is_equivalent((u.m, u.s, u.kg))
assert not u.L.is_equivalent((u.m, u.s, u.kg))
assert not (u.km / u.s).is_equivalent((u.m, u.s, u.kg))
def test_parallax():
a = u.arcsecond.to(u.pc, 10, u.parallax())
assert_allclose(a, 0.10, rtol=1.e-12)
b = u.pc.to(u.arcsecond, a, u.parallax())
assert_allclose(b, 10, rtol=1.e-12)
a = u.arcminute.to(u.au, 1, u.parallax())
assert_allclose(a, 3437.746770785, rtol=1.e-12)
b = u.au.to(u.arcminute, a, u.parallax())
assert_allclose(b, 1, rtol=1.e-12)
val = (-1 * u.mas).to(u.pc, u.parallax())
assert np.isnan(val.value)
val = (-1 * u.mas).to_value(u.pc, u.parallax())
assert np.isnan(val)
def test_parallax2():
a = u.arcsecond.to(u.pc, [0.1, 2.5], u.parallax())
assert_allclose(a, [10, 0.4], rtol=1.e-12)
def test_spectral():
a = u.AA.to(u.Hz, 1, u.spectral())
assert_allclose(a, 2.9979245799999995e+18)
b = u.Hz.to(u.AA, a, u.spectral())
assert_allclose(b, 1)
a = u.AA.to(u.MHz, 1, u.spectral())
assert_allclose(a, 2.9979245799999995e+12)
b = u.MHz.to(u.AA, a, u.spectral())
assert_allclose(b, 1)
a = u.m.to(u.Hz, 1, u.spectral())
assert_allclose(a, 2.9979245799999995e+8)
b = u.Hz.to(u.m, a, u.spectral())
assert_allclose(b, 1)
def test_spectral2():
a = u.nm.to(u.J, 500, u.spectral())
assert_allclose(a, 3.972891366538605e-19)
b = u.J.to(u.nm, a, u.spectral())
assert_allclose(b, 500)
a = u.AA.to(u.Hz, 1, u.spectral())
b = u.Hz.to(u.J, a, u.spectral())
c = u.AA.to(u.J, 1, u.spectral())
assert_allclose(b, c)
c = u.J.to(u.Hz, b, u.spectral())
assert_allclose(a, c)
def test_spectral3():
a = u.nm.to(u.Hz, [1000, 2000], u.spectral())
assert_allclose(a, [2.99792458e+14, 1.49896229e+14])
@pytest.mark.parametrize(
('in_val', 'in_unit'),
[([0.1, 5000.0, 10000.0], u.AA),
([1e+5, 2.0, 1.0], u.micron ** -1),
([2.99792458e+19, 5.99584916e+14, 2.99792458e+14], u.Hz),
([1.98644568e-14, 3.97289137e-19, 1.98644568e-19], u.J)])
def test_spectral4(in_val, in_unit):
"""Wave number conversion w.r.t. wavelength, freq, and energy."""
# Spectroscopic and angular
out_units = [u.micron ** -1, u.radian / u.micron]
answers = [[1e+5, 2.0, 1.0], [6.28318531e+05, 12.5663706, 6.28318531]]
for out_unit, ans in zip(out_units, answers):
# Forward
a = in_unit.to(out_unit, in_val, u.spectral())
assert_allclose(a, ans)
# Backward
b = out_unit.to(in_unit, ans, u.spectral())
assert_allclose(b, in_val)
@pytest.mark.parametrize('wav', (3500 * u.AA,
8.5654988e+14 * u.Hz,
1 / (3500 * u.AA),
5.67555959e-19 * u.J))
def test_spectraldensity2(wav):
# flux density
flambda = u.erg / u.angstrom / u.cm ** 2 / u.s
fnu = u.erg / u.Hz / u.cm ** 2 / u.s
a = flambda.to(fnu, 1, u.spectral_density(wav))
assert_allclose(a, 4.086160166177361e-12)
# integrated flux
f_int = u.erg / u.cm ** 2 / u.s
phot_int = u.ph / u.cm ** 2 / u.s
a = f_int.to(phot_int, 1, u.spectral_density(wav))
assert_allclose(a, 1.7619408e+11)
a = phot_int.to(f_int, 1, u.spectral_density(wav))
assert_allclose(a, 5.67555959e-12)
# luminosity density
llambda = u.erg / u.angstrom / u.s
lnu = u.erg / u.Hz / u.s
a = llambda.to(lnu, 1, u.spectral_density(wav))
assert_allclose(a, 4.086160166177361e-12)
a = lnu.to(llambda, 1, u.spectral_density(wav))
assert_allclose(a, 2.44728537142857e11)
def test_spectraldensity3():
# Define F_nu in Jy
f_nu = u.Jy
# Define F_lambda in ergs / cm^2 / s / micron
f_lambda = u.erg / u.cm ** 2 / u.s / u.micron
# 1 GHz
one_ghz = u.Quantity(1, u.GHz)
# Convert to ergs / cm^2 / s / Hz
assert_allclose(f_nu.to(u.erg / u.cm ** 2 / u.s / u.Hz, 1.), 1.e-23, 10)
# Convert to ergs / cm^2 / s at 10 Ghz
assert_allclose(f_nu.to(u.erg / u.cm ** 2 / u.s, 1.,
equivalencies=u.spectral_density(one_ghz * 10)),
1.e-13, 10)
# Convert to F_lambda at 1 Ghz
assert_allclose(f_nu.to(f_lambda, 1.,
equivalencies=u.spectral_density(one_ghz)),
3.335640951981521e-20, 10)
# Convert to Jy at 1 Ghz
assert_allclose(f_lambda.to(u.Jy, 1.,
equivalencies=u.spectral_density(one_ghz)),
1. / 3.335640951981521e-20, 10)
# Convert to ergs / cm^2 / s at 10 microns
assert_allclose(f_lambda.to(u.erg / u.cm ** 2 / u.s, 1.,
equivalencies=u.spectral_density(u.Quantity(10, u.micron))),
10., 10)
def test_spectraldensity4():
"""PHOTLAM and PHOTNU conversions."""
flam = u.erg / (u.cm ** 2 * u.s * u.AA)
fnu = u.erg / (u.cm ** 2 * u.s * u.Hz)
photlam = u.photon / (u.cm ** 2 * u.s * u.AA)
photnu = u.photon / (u.cm ** 2 * u.s * u.Hz)
wave = u.Quantity([4956.8, 4959.55, 4962.3], u.AA)
flux_photlam = [9.7654e-3, 1.003896e-2, 9.78473e-3]
flux_photnu = [8.00335589e-14, 8.23668949e-14, 8.03700310e-14]
flux_flam = [3.9135e-14, 4.0209e-14, 3.9169e-14]
flux_fnu = [3.20735792e-25, 3.29903646e-25, 3.21727226e-25]
flux_jy = [3.20735792e-2, 3.29903646e-2, 3.21727226e-2]
flux_stmag = [12.41858665, 12.38919182, 12.41764379]
flux_abmag = [12.63463143, 12.60403221, 12.63128047]
# PHOTLAM <--> FLAM
assert_allclose(photlam.to(
flam, flux_photlam, u.spectral_density(wave)), flux_flam, rtol=1e-6)
assert_allclose(flam.to(
photlam, flux_flam, u.spectral_density(wave)), flux_photlam, rtol=1e-6)
# PHOTLAM <--> FNU
assert_allclose(photlam.to(
fnu, flux_photlam, u.spectral_density(wave)), flux_fnu, rtol=1e-6)
assert_allclose(fnu.to(
photlam, flux_fnu, u.spectral_density(wave)), flux_photlam, rtol=1e-6)
# PHOTLAM <--> Jy
assert_allclose(photlam.to(
u.Jy, flux_photlam, u.spectral_density(wave)), flux_jy, rtol=1e-6)
assert_allclose(u.Jy.to(
photlam, flux_jy, u.spectral_density(wave)), flux_photlam, rtol=1e-6)
# PHOTLAM <--> PHOTNU
assert_allclose(photlam.to(
photnu, flux_photlam, u.spectral_density(wave)), flux_photnu, rtol=1e-6)
assert_allclose(photnu.to(
photlam, flux_photnu, u.spectral_density(wave)), flux_photlam, rtol=1e-6)
# PHOTNU <--> FNU
assert_allclose(photnu.to(
fnu, flux_photnu, u.spectral_density(wave)), flux_fnu, rtol=1e-6)
assert_allclose(fnu.to(
photnu, flux_fnu, u.spectral_density(wave)), flux_photnu, rtol=1e-6)
# PHOTNU <--> FLAM
assert_allclose(photnu.to(
flam, flux_photnu, u.spectral_density(wave)), flux_flam, rtol=1e-6)
assert_allclose(flam.to(
photnu, flux_flam, u.spectral_density(wave)), flux_photnu, rtol=1e-6)
# PHOTLAM <--> STMAG
assert_allclose(photlam.to(
u.STmag, flux_photlam, u.spectral_density(wave)), flux_stmag, rtol=1e-6)
assert_allclose(u.STmag.to(
photlam, flux_stmag, u.spectral_density(wave)), flux_photlam, rtol=1e-6)
# PHOTLAM <--> ABMAG
assert_allclose(photlam.to(
u.ABmag, flux_photlam, u.spectral_density(wave)), flux_abmag, rtol=1e-6)
assert_allclose(u.ABmag.to(
photlam, flux_abmag, u.spectral_density(wave)), flux_photlam, rtol=1e-6)
def test_spectraldensity5():
""" Test photon luminosity density conversions. """
L_la = u.erg / (u.s * u.AA)
L_nu = u.erg / (u.s * u.Hz)
phot_L_la = u.photon / (u.s * u.AA)
phot_L_nu = u.photon / (u.s * u.Hz)
wave = u.Quantity([4956.8, 4959.55, 4962.3], u.AA)
flux_phot_L_la = [9.7654e-3, 1.003896e-2, 9.78473e-3]
flux_phot_L_nu = [8.00335589e-14, 8.23668949e-14, 8.03700310e-14]
flux_L_la = [3.9135e-14, 4.0209e-14, 3.9169e-14]
flux_L_nu = [3.20735792e-25, 3.29903646e-25, 3.21727226e-25]
# PHOTLAM <--> FLAM
assert_allclose(phot_L_la.to(
L_la, flux_phot_L_la, u.spectral_density(wave)), flux_L_la, rtol=1e-6)
assert_allclose(L_la.to(
phot_L_la, flux_L_la, u.spectral_density(wave)), flux_phot_L_la, rtol=1e-6)
# PHOTLAM <--> FNU
assert_allclose(phot_L_la.to(
L_nu, flux_phot_L_la, u.spectral_density(wave)), flux_L_nu, rtol=1e-6)
assert_allclose(L_nu.to(
phot_L_la, flux_L_nu, u.spectral_density(wave)), flux_phot_L_la, rtol=1e-6)
# PHOTLAM <--> PHOTNU
assert_allclose(phot_L_la.to(
phot_L_nu, flux_phot_L_la, u.spectral_density(wave)), flux_phot_L_nu, rtol=1e-6)
assert_allclose(phot_L_nu.to(
phot_L_la, flux_phot_L_nu, u.spectral_density(wave)), flux_phot_L_la, rtol=1e-6)
# PHOTNU <--> FNU
assert_allclose(phot_L_nu.to(
L_nu, flux_phot_L_nu, u.spectral_density(wave)), flux_L_nu, rtol=1e-6)
assert_allclose(L_nu.to(
phot_L_nu, flux_L_nu, u.spectral_density(wave)), flux_phot_L_nu, rtol=1e-6)
# PHOTNU <--> FLAM
assert_allclose(phot_L_nu.to(
L_la, flux_phot_L_nu, u.spectral_density(wave)), flux_L_la, rtol=1e-6)
assert_allclose(L_la.to(
phot_L_nu, flux_L_la, u.spectral_density(wave)), flux_phot_L_nu, rtol=1e-6)
def test_spectraldensity6():
""" Test surface brightness conversions. """
slam = u.erg / (u.cm ** 2 * u.s * u.AA * u.sr)
snu = u.erg / (u.cm ** 2 * u.s * u.Hz * u.sr)
wave = u.Quantity([4956.8, 4959.55, 4962.3], u.AA)
sb_flam = [3.9135e-14, 4.0209e-14, 3.9169e-14]
sb_fnu = [3.20735792e-25, 3.29903646e-25, 3.21727226e-25]
# S(nu) <--> S(lambda)
assert_allclose(snu.to(
slam, sb_fnu, u.spectral_density(wave)), sb_flam, rtol=1e-6)
assert_allclose(slam.to(
snu, sb_flam, u.spectral_density(wave)), sb_fnu, rtol=1e-6)
@pytest.mark.parametrize(
('from_unit', 'to_unit'),
[(u.ph / u.cm ** 2 / u.s, (u.cm * u.cm * u.s) ** -1),
(u.ph / u.cm ** 2 / u.s, u.erg / (u.cm * u.cm * u.s * u.keV)),
(u.erg / u.cm ** 2 / u.s, (u.cm * u.cm * u.s) ** -1),
(u.erg / u.cm ** 2 / u.s, u.erg / (u.cm * u.cm * u.s * u.keV))])
def test_spectraldensity_not_allowed(from_unit, to_unit):
"""Not allowed to succeed as
per https://github.com/astropy/astropy/pull/10015
"""
with pytest.raises(u.UnitConversionError, match='not convertible'):
from_unit.to(to_unit, 1, u.spectral_density(1 * u.AA))
# The other way
with pytest.raises(u.UnitConversionError, match='not convertible'):
to_unit.to(from_unit, 1, u.spectral_density(1 * u.AA))
def test_equivalent_units():
from astropy.units import imperial
with u.add_enabled_units(imperial):
units = u.g.find_equivalent_units()
units_set = set(units)
match = set(
[u.M_e, u.M_p, u.g, u.kg, u.solMass, u.t, u.u, u.M_earth,
u.M_jup, imperial.oz, imperial.lb, imperial.st, imperial.ton,
imperial.slug])
assert units_set == match
r = repr(units)
assert r.count('\n') == len(units) + 2
def test_equivalent_units2():
units = set(u.Hz.find_equivalent_units(u.spectral()))
match = set(
[u.AU, u.Angstrom, u.Hz, u.J, u.Ry, u.cm, u.eV, u.erg, u.lyr, u.lsec,
u.m, u.micron, u.pc, u.solRad, u.Bq, u.Ci, u.k, u.earthRad,
u.jupiterRad])
assert units == match
from astropy.units import imperial
with u.add_enabled_units(imperial):
units = set(u.Hz.find_equivalent_units(u.spectral()))
match = set(
[u.AU, u.Angstrom, imperial.BTU, u.Hz, u.J, u.Ry,
imperial.cal, u.cm, u.eV, u.erg, imperial.ft, imperial.fur,
imperial.inch, imperial.kcal, u.lyr, u.m, imperial.mi, u.lsec,
imperial.mil, u.micron, u.pc, u.solRad, imperial.yd, u.Bq, u.Ci,
imperial.nmi, u.k, u.earthRad, u.jupiterRad])
assert units == match
units = set(u.Hz.find_equivalent_units(u.spectral()))
match = set(
[u.AU, u.Angstrom, u.Hz, u.J, u.Ry, u.cm, u.eV, u.erg, u.lyr, u.lsec,
u.m, u.micron, u.pc, u.solRad, u.Bq, u.Ci, u.k, u.earthRad,
u.jupiterRad])
assert units == match
def test_trivial_equivalency():
assert u.m.to(u.kg, equivalencies=[(u.m, u.kg)]) == 1.0
def test_invalid_equivalency():
with pytest.raises(ValueError):
u.m.to(u.kg, equivalencies=[(u.m,)])
with pytest.raises(ValueError):
u.m.to(u.kg, equivalencies=[(u.m, 5.0)])
def test_irrelevant_equivalency():
with pytest.raises(u.UnitsError):
u.m.to(u.kg, equivalencies=[(u.m, u.l)])
def test_brightness_temperature():
omega_B = np.pi * (50 * u.arcsec) ** 2
nu = u.GHz * 5
tb = 7.052587837212582 * u.K
np.testing.assert_almost_equal(
tb.value, (1 * u.Jy).to_value(
u.K, equivalencies=u.brightness_temperature(nu, beam_area=omega_B)))
np.testing.assert_almost_equal(
1.0, tb.to_value(
u.Jy, equivalencies=u.brightness_temperature(nu, beam_area=omega_B)))
def test_swapped_args_brightness_temperature():
"""
#5173 changes the order of arguments but accepts the old (deprecated) args
"""
omega_B = np.pi * (50 * u.arcsec) ** 2
nu = u.GHz * 5
tb = 7.052587837212582 * u.K
with pytest.warns(AstropyDeprecationWarning) as w:
result = (1*u.Jy).to(
u.K, equivalencies=u.brightness_temperature(omega_B, nu))
roundtrip = result.to(
u.Jy, equivalencies=u.brightness_temperature(omega_B, nu))
assert len(w) == 2
np.testing.assert_almost_equal(tb.value, result.value)
np.testing.assert_almost_equal(roundtrip.value, 1)
def test_surfacebrightness():
sb = 50*u.MJy/u.sr
k = sb.to(u.K, u.brightness_temperature(50*u.GHz))
np.testing.assert_almost_equal(k.value, 0.650965, 5)
assert k.unit.is_equivalent(u.K)
def test_beam():
# pick a beam area: 2 pi r^2 = area of a Gaussina with sigma=50 arcsec
omega_B = 2 * np.pi * (50 * u.arcsec) ** 2
new_beam = (5*u.beam).to(u.sr, u.equivalencies.beam_angular_area(omega_B))
np.testing.assert_almost_equal(omega_B.to(u.sr).value * 5, new_beam.value)
assert new_beam.unit.is_equivalent(u.sr)
# make sure that it's still consistent with 5 beams
nbeams = new_beam.to(u.beam, u.equivalencies.beam_angular_area(omega_B))
np.testing.assert_almost_equal(nbeams.value, 5)
# test inverse beam equivalency
# (this is just a sanity check that the equivalency is defined;
# it's not for testing numerical consistency)
(5/u.beam).to(1/u.sr, u.equivalencies.beam_angular_area(omega_B))
# test practical case
# (this is by far the most important one)
flux_density = (5*u.Jy/u.beam).to(u.MJy/u.sr, u.equivalencies.beam_angular_area(omega_B))
np.testing.assert_almost_equal(flux_density.value, 13.5425483146382)
def test_thermodynamic_temperature():
nu = 143 * u.GHz
tb = 0.0026320501262630277 * u.K
eq = u.thermodynamic_temperature(nu, T_cmb=2.7255 * u.K)
np.testing.assert_almost_equal(
tb.value, (1 * (u.MJy / u.sr)).to_value(u.K, equivalencies=eq))
np.testing.assert_almost_equal(
1.0, tb.to_value(u.MJy / u.sr, equivalencies=eq))
def test_equivalency_context():
with u.set_enabled_equivalencies(u.dimensionless_angles()):
phase = u.Quantity(1., u.cycle)
assert_allclose(np.exp(1j*phase), 1.)
Omega = u.cycle / (1.*u.minute)
assert_allclose(np.exp(1j*Omega*60.*u.second), 1.)
# ensure we can turn off equivalencies even within the scope
with pytest.raises(u.UnitsError):
phase.to(1, equivalencies=None)
# test the manager also works in the Quantity constructor.
q1 = u.Quantity(phase, u.dimensionless_unscaled)
assert_allclose(q1.value, u.cycle.to(u.radian))
# and also if we use a class that happens to have a unit attribute.
class MyQuantityLookalike(np.ndarray):
pass
mylookalike = np.array(1.).view(MyQuantityLookalike)
mylookalike.unit = 'cycle'
# test the manager also works in the Quantity constructor.
q2 = u.Quantity(mylookalike, u.dimensionless_unscaled)
assert_allclose(q2.value, u.cycle.to(u.radian))
with u.set_enabled_equivalencies(u.spectral()):
u.GHz.to(u.cm)
eq_on = u.GHz.find_equivalent_units()
with pytest.raises(u.UnitsError):
u.GHz.to(u.cm, equivalencies=None)
# without equivalencies, we should find a smaller (sub)set
eq_off = u.GHz.find_equivalent_units()
assert all(eq in set(eq_on) for eq in eq_off)
assert set(eq_off) < set(eq_on)
# Check the equivalency manager also works in ufunc evaluations,
# not just using (wrong) scaling. [#2496]
l2v = u.doppler_optical(6000 * u.angstrom)
l1 = 6010 * u.angstrom
assert l1.to(u.km/u.s, equivalencies=l2v) > 100. * u.km / u.s
with u.set_enabled_equivalencies(l2v):
assert l1 > 100. * u.km / u.s
assert abs((l1 - 500. * u.km / u.s).to(u.angstrom)) < 1. * u.km/u.s
def test_equivalency_context_manager():
base_registry = u.get_current_unit_registry()
def just_to_from_units(equivalencies):
return [(equiv[0], equiv[1]) for equiv in equivalencies]
tf_dimensionless_angles = just_to_from_units(u.dimensionless_angles())
tf_spectral = just_to_from_units(u.spectral())
# <=1 b/c might have the dimensionless_redshift equivalency enabled.
assert len(base_registry.equivalencies) <= 1
with u.set_enabled_equivalencies(u.dimensionless_angles()):
new_registry = u.get_current_unit_registry()
assert (set(just_to_from_units(new_registry.equivalencies)) ==
set(tf_dimensionless_angles))
assert set(new_registry.all_units) == set(base_registry.all_units)
with u.set_enabled_equivalencies(u.spectral()):
newer_registry = u.get_current_unit_registry()
assert (set(just_to_from_units(newer_registry.equivalencies)) ==
set(tf_spectral))
assert (set(newer_registry.all_units) ==
set(base_registry.all_units))
assert (set(just_to_from_units(new_registry.equivalencies)) ==
set(tf_dimensionless_angles))
assert set(new_registry.all_units) == set(base_registry.all_units)
with u.add_enabled_equivalencies(u.spectral()):
newer_registry = u.get_current_unit_registry()
assert (set(just_to_from_units(newer_registry.equivalencies)) ==
set(tf_dimensionless_angles) | set(tf_spectral))
assert (set(newer_registry.all_units) ==
set(base_registry.all_units))
assert base_registry is u.get_current_unit_registry()
def test_temperature():
from astropy.units.imperial import deg_F, deg_R
t_k = 0 * u.K
assert_allclose(t_k.to_value(u.deg_C, u.temperature()), -273.15)
assert_allclose(t_k.to_value(deg_F, u.temperature()), -459.67)
t_k = 20 * u.K
assert_allclose(t_k.to_value(deg_R, u.temperature()), 36.0)
t_k = 20 * deg_R
assert_allclose(t_k.to_value(u.K, u.temperature()), 11.11, atol=0.01)
t_k = 20 * deg_F
assert_allclose(t_k.to_value(deg_R, u.temperature()), 479.67)
t_k = 20 * deg_R
assert_allclose(t_k.to_value(deg_F, u.temperature()), -439.67)
t_k = 20 * u.deg_C
assert_allclose(t_k.to_value(deg_R, u.temperature()), 527.67)
t_k = 20 * deg_R
assert_allclose(t_k.to_value(u.deg_C, u.temperature()), -262.039, atol=0.01)
def test_temperature_energy():
x = 1000 * u.K
y = (x * constants.k_B).to(u.keV)
assert_allclose(x.to_value(u.keV, u.temperature_energy()), y.value)
assert_allclose(y.to_value(u.K, u.temperature_energy()), x.value)
def test_molar_mass_amu():
x = 1 * (u.g/u.mol)
y = 1 * u.u
assert_allclose(x.to_value(u.u, u.molar_mass_amu()), y.value)
assert_allclose(y.to_value(u.g/u.mol, u.molar_mass_amu()), x.value)
with pytest.raises(u.UnitsError):
x.to(u.u)
def test_compose_equivalencies():
x = u.Unit("arcsec").compose(units=(u.pc,), equivalencies=u.parallax())
assert x[0] == u.pc
x = u.Unit("2 arcsec").compose(units=(u.pc,), equivalencies=u.parallax())
assert x[0] == u.Unit(0.5 * u.pc)
x = u.degree.compose(equivalencies=u.dimensionless_angles())
assert u.Unit(u.degree.to(u.radian)) in x
x = (u.nm).compose(units=(u.m, u.s), equivalencies=u.doppler_optical(0.55*u.micron))
for y in x:
if y.bases == [u.m, u.s]:
assert y.powers == [1, -1]
assert_allclose(
y.scale,
u.nm.to(u.m / u.s, equivalencies=u.doppler_optical(0.55 * u.micron)))
break
else:
assert False, "Didn't find speed in compose results"
def test_pixel_scale():
pix = 75*u.pix
asec = 30*u.arcsec
pixscale = 0.4*u.arcsec/u.pix
pixscale2 = 2.5*u.pix/u.arcsec
assert_quantity_allclose(pix.to(u.arcsec, u.pixel_scale(pixscale)), asec)
assert_quantity_allclose(pix.to(u.arcmin, u.pixel_scale(pixscale)), asec)
assert_quantity_allclose(pix.to(u.arcsec, u.pixel_scale(pixscale2)), asec)
assert_quantity_allclose(pix.to(u.arcmin, u.pixel_scale(pixscale2)), asec)
assert_quantity_allclose(asec.to(u.pix, u.pixel_scale(pixscale)), pix)
assert_quantity_allclose(asec.to(u.pix, u.pixel_scale(pixscale2)), pix)
def test_pixel_scale_invalid_scale_unit():
pixscale = 0.4 * u.arcsec
pixscale2 = 0.4 * u.arcsec / u.pix ** 2
with pytest.raises(u.UnitsError, match="pixel dimension"):
u.pixel_scale(pixscale)
with pytest.raises(u.UnitsError, match="pixel dimension"):
u.pixel_scale(pixscale2)
def test_pixel_scale_acceptable_scale_unit():
pix = 75 * u.pix
v = 3000 * (u.cm / u.s)
pixscale = 0.4 * (u.m / u.s / u.pix)
pixscale2 = 2.5 * (u.pix / (u.m / u.s))
assert_quantity_allclose(pix.to(u.m / u.s, u.pixel_scale(pixscale)), v)
assert_quantity_allclose(pix.to(u.km / u.s, u.pixel_scale(pixscale)), v)
assert_quantity_allclose(pix.to(u.m / u.s, u.pixel_scale(pixscale2)), v)
assert_quantity_allclose(pix.to(u.km / u.s, u.pixel_scale(pixscale2)), v)
assert_quantity_allclose(v.to(u.pix, u.pixel_scale(pixscale)), pix)
assert_quantity_allclose(v.to(u.pix, u.pixel_scale(pixscale2)), pix)
def test_plate_scale():
mm = 1.5*u.mm
asec = 30*u.arcsec
platescale = 20*u.arcsec/u.mm
platescale2 = 0.05*u.mm/u.arcsec
assert_quantity_allclose(mm.to(u.arcsec, u.plate_scale(platescale)), asec)
assert_quantity_allclose(mm.to(u.arcmin, u.plate_scale(platescale)), asec)
assert_quantity_allclose(mm.to(u.arcsec, u.plate_scale(platescale2)), asec)
assert_quantity_allclose(mm.to(u.arcmin, u.plate_scale(platescale2)), asec)
assert_quantity_allclose(asec.to(u.mm, u.plate_scale(platescale)), mm)
assert_quantity_allclose(asec.to(u.mm, u.plate_scale(platescale2)), mm)
def test_equivelency():
ps = u.pixel_scale(10*u.arcsec/u.pix)
assert isinstance(ps, Equivalency)
assert isinstance(ps.name, list)
assert len(ps.name) == 1
assert ps.name[0] == "pixel_scale"
assert isinstance(ps.kwargs, list)
assert len(ps.kwargs) == 1
assert ps.kwargs[0] == dict({'pixscale': 10*u.arcsec/u.pix})
def test_add_equivelencies():
e1 = u.pixel_scale(10*u.arcsec/u.pixel) + u.temperature_energy()
assert isinstance(e1, Equivalency)
assert e1.name == ["pixel_scale", "temperature_energy"]
assert isinstance(e1.kwargs, list)
assert e1.kwargs == [dict({'pixscale': 10*u.arcsec/u.pix}), dict()]
e2 = u.pixel_scale(10*u.arcsec/u.pixel) + [1, 2, 3]
assert isinstance(e2, list)
def test_pprint():
pprint_class = u.UnitBase.EquivalentUnitsList
equiv_units_to_Hz = u.Hz.find_equivalent_units()
assert pprint_class.__repr__(equiv_units_to_Hz).splitlines() == [
' Primary name | Unit definition | Aliases ',
'[',
' Bq | 1 / s | becquerel ,',
' Ci | 3.7e+10 / s | curie ,',
' Hz | 1 / s | Hertz, hertz ,',
']'
]
assert pprint_class._repr_html_(equiv_units_to_Hz) == (
'<table style="width:50%">'
'<tr><th>Primary name</th><th>Unit definition</th>'
'<th>Aliases</th></tr>'
'<tr><td>Bq</td><td>1 / s</td><td>becquerel</td></tr>'
'<tr><td>Ci</td><td>3.7e+10 / s</td><td>curie</td></tr>'
'<tr><td>Hz</td><td>1 / s</td><td>Hertz, hertz</td></tr></table>'
)
|
bb1b8f9a18f0cc06aae3f984f6243b55311d671d0226137c8735785a4e8d7dd5 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test setting and adding unit aliases."""
import pytest
import astropy.units as u
trials = [
({"Angstroms": u.AA}, "Angstroms", u.AA),
({"counts": u.count}, "counts/s", u.count / u.s),
({"ergs": u.erg, "Angstroms": u.AA},
"ergs/(s cm**2 Angstroms)", u.erg / (u.s * u.cm**2 * u.AA))]
class TestAliases:
def teardown_method(self):
u.set_enabled_aliases({})
def teardown_class(self):
assert u.get_current_unit_registry().aliases == {}
@pytest.mark.parametrize('format_', [None, 'fits', 'ogip', 'vounit', 'cds'])
@pytest.mark.parametrize('aliases,bad,unit', trials)
def test_set_enabled_aliases_context_manager(self, aliases, bad, unit, format_):
if format_ == 'cds':
bad = bad.replace(' ', '.').replace('**', '')
with u.set_enabled_aliases(aliases):
assert u.get_current_unit_registry().aliases == aliases
assert u.Unit(bad) == unit
assert u.get_current_unit_registry().aliases == {}
with pytest.raises(ValueError):
u.Unit(bad)
@pytest.mark.parametrize('aliases,bad,unit', trials)
def test_add_enabled_aliases_context_manager(self, aliases, bad, unit):
with u.add_enabled_aliases(aliases):
assert u.get_current_unit_registry().aliases == aliases
assert u.Unit(bad) == unit
assert u.get_current_unit_registry().aliases == {}
with pytest.raises(ValueError):
u.Unit(bad)
def test_set_enabled_aliases(self):
for i, (aliases, bad, unit) in enumerate(trials):
u.set_enabled_aliases(aliases)
assert u.get_current_unit_registry().aliases == aliases
assert u.Unit(bad) == unit
for _, bad2, unit2 in trials:
if bad2 == bad or bad2 in aliases:
assert u.Unit(bad2) == unit2
else:
with pytest.raises(ValueError):
u.Unit(bad2)
def test_add_enabled_aliases(self):
expected_aliases = {}
for i, (aliases, bad, unit) in enumerate(trials):
u.add_enabled_aliases(aliases)
expected_aliases.update(aliases)
assert u.get_current_unit_registry().aliases == expected_aliases
assert u.Unit(bad) == unit
for j, (_, bad2, unit2) in enumerate(trials):
if j <= i:
assert u.Unit(bad2) == unit2
else:
with pytest.raises(ValueError):
u.Unit(bad2)
def test_cannot_alias_existing_unit(self):
with pytest.raises(ValueError, match='already means'):
u.set_enabled_aliases({'pct': u.Unit(1e-12*u.count)})
def test_cannot_alias_existing_alias_to_another_unit(self):
u.set_enabled_aliases({'counts': u.count})
with pytest.raises(ValueError, match='already is an alias'):
u.add_enabled_aliases({'counts': u.adu})
|
9b4122847abfcb1f646aef17bc4cdbc744a987f7d320187dff58d5a8bfcbdda1 | # coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test utilities for `astropy.units`.
"""
import numpy as np
from numpy import finfo
from astropy.units.quantity import Quantity
from astropy.units.utils import quantity_asanyarray, sanitize_scale
_float_finfo = finfo(float)
def test_quantity_asanyarray():
array_of_quantities = [Quantity(1), Quantity(2), Quantity(3)]
quantity_array = quantity_asanyarray(array_of_quantities)
assert isinstance(quantity_array, Quantity)
array_of_integers = [1, 2, 3]
np_array = quantity_asanyarray(array_of_integers)
assert isinstance(np_array, np.ndarray)
def test_sanitize_scale():
assert sanitize_scale(complex(2, _float_finfo.eps)) == 2
assert sanitize_scale(complex(_float_finfo.eps, 2)) == 2j
|
d704e129caca9881f6e99d4fa0bd8da07c9208a2145469aabf6de09fac4a6d09 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Regression tests for deprecated units or those that are "soft" deprecated
because they are required for VOUnit support but are not in common use."""
import pytest
from astropy import units as u
from astropy.units import deprecated, required_by_vounit
def test_emu():
with pytest.raises(AttributeError):
u.emu
assert u.Bi.to(deprecated.emu, 1) == 1
with deprecated.enable():
assert u.Bi.compose()[0] == deprecated.emu
assert u.Bi.compose()[0] == u.Bi
# test that the earth/jupiter mass/rad are also in the deprecated bunch
for body in ('earth', 'jupiter'):
for phystype in ('Mass', 'Rad'):
# only test a couple prefixes to same time
for prefix in ('n', 'y'):
namewoprefix = body + phystype
unitname = prefix + namewoprefix
with pytest.raises(AttributeError):
getattr(u, unitname)
assert (getattr(deprecated, unitname).represents.bases[0] ==
getattr(u, namewoprefix))
def test_required_by_vounit():
# The tests below could be replicated with all the various prefixes, but it
# seems unnecessary because they all come as a set. So we only use nano for
# the purposes of this test.
with pytest.raises(AttributeError):
# nano-solar mass/rad/lum shouldn't be in the base unit namespace
u.nsolMass
u.nsolRad
u.nsolLum
# but they should be enabled by default via required_by_vounit, to allow
# the Unit constructor to accept them
assert u.Unit('nsolMass') == required_by_vounit.nsolMass
assert u.Unit('nsolRad') == required_by_vounit.nsolRad
assert u.Unit('nsolLum') == required_by_vounit.nsolLum
# but because they are prefixes, they shouldn't be in find_equivalent_units
assert required_by_vounit.nsolMass not in u.solMass.find_equivalent_units()
assert required_by_vounit.nsolRad not in u.solRad.find_equivalent_units()
assert required_by_vounit.nsolLum not in u.solLum.find_equivalent_units()
|
8ae8413961995b06a056aaa49dee46f5b74a3b971880f87250e4a7a82e2ca592 | # coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the photometric module. Note that this is shorter than
might be expected because a lot of the relevant tests that deal
with magnidues are in `test_logarithmic.py`
"""
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import AA, ABflux, Jy, Magnitude, STflux, cm, erg, mgy, nmgy, s, zero_point_flux
def test_maggies():
assert_quantity_allclose(1e-9*mgy, 1*nmgy)
assert_quantity_allclose(Magnitude((1*nmgy).to(mgy)).value, 22.5)
def test_maggies_zpts():
assert_quantity_allclose((1*nmgy).to(ABflux, zero_point_flux(1*ABflux)), 3631e-9*Jy, rtol=1e-3)
ST_base_unit = erg * cm**-2 / s / AA
stmgy = (10*mgy).to(STflux, zero_point_flux(1*ST_base_unit))
assert_quantity_allclose(stmgy, 10*ST_base_unit)
mgyst = (2*ST_base_unit).to(mgy, zero_point_flux(0.5*ST_base_unit))
assert_quantity_allclose(mgyst, 4*mgy)
nmgyst = (5.e-10*ST_base_unit).to(mgy, zero_point_flux(0.5*ST_base_unit))
assert_quantity_allclose(nmgyst, 1*nmgy)
|
e21a329e4e4d53520642193e059dc5d501adc797e8ec0eca45ca6f87a336aa38 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Regression tests for the units.format package
"""
import warnings
from contextlib import nullcontext
from fractions import Fraction
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.constants import si
from astropy.units import UnitsWarning, core, dex
from astropy.units import format as u_format
from astropy.units.utils import is_effectively_unity
@pytest.mark.parametrize('strings, unit', [
(["m s", "m*s", "m.s"], u.m * u.s),
(["m/s", "m*s**-1", "m /s", "m / s", "m/ s"], u.m / u.s),
(["m**2", "m2", "m**(2)", "m**+2", "m+2", "m^(+2)"], u.m ** 2),
(["m**-3", "m-3", "m^(-3)", "/m3"], u.m ** -3),
(["m**(1.5)", "m(3/2)", "m**(3/2)", "m^(3/2)"], u.m ** 1.5),
(["2.54 cm"], u.Unit(u.cm * 2.54)),
(["10+8m"], u.Unit(u.m * 1e8)),
# This is the VOUnits documentation, but doesn't seem to follow the
# unity grammar (["3.45 10**(-4)Jy"], 3.45 * 1e-4 * u.Jy)
(["sqrt(m)"], u.m ** 0.5),
(["dB(mW)", "dB (mW)"], u.DecibelUnit(u.mW)),
(["mag"], u.mag),
(["mag(ct/s)"], u.MagUnit(u.ct / u.s)),
(["dex"], u.dex),
(["dex(cm s**-2)", "dex(cm/s2)"], u.DexUnit(u.cm / u.s**2)),
])
def test_unit_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.Generic.parse(s)
assert unit2 == unit
@pytest.mark.parametrize('string', ['sin( /pixel /s)', 'mag(mag)',
'dB(dB(mW))', 'dex()'])
def test_unit_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.Generic.parse(string)
@pytest.mark.parametrize('strings, unit', [
(["0.1nm"], u.AA),
(["mW/m2"], u.Unit(u.erg / u.cm ** 2 / u.s)),
(["mW/(m2)"], u.Unit(u.erg / u.cm ** 2 / u.s)),
(["km/s", "km.s-1"], u.km / u.s),
(["10pix/nm"], u.Unit(10 * u.pix / u.nm)),
(["1.5x10+11m"], u.Unit(1.5e11 * u.m)),
(["1.5×10+11m"], u.Unit(1.5e11 * u.m)),
(["m2"], u.m ** 2),
(["10+21m"], u.Unit(u.m * 1e21)),
(["2.54cm"], u.Unit(u.cm * 2.54)),
(["20%"], 0.20 * u.dimensionless_unscaled),
(["10+9"], 1.e9 * u.dimensionless_unscaled),
(["2x10-9"], 2.e-9 * u.dimensionless_unscaled),
(["---"], u.dimensionless_unscaled),
(["ma"], u.ma),
(["mAU"], u.mAU),
(["uarcmin"], u.uarcmin),
(["uarcsec"], u.uarcsec),
(["kbarn"], u.kbarn),
(["Gbit"], u.Gbit),
(["Gibit"], 2 ** 30 * u.bit),
(["kbyte"], u.kbyte),
(["mRy"], 0.001 * u.Ry),
(["mmag"], u.mmag),
(["Mpc"], u.Mpc),
(["Gyr"], u.Gyr),
(["°"], u.degree),
(["°/s"], u.degree / u.s),
(["Å"], u.AA),
(["Å/s"], u.AA / u.s),
(["\\h"], si.h),
(["[cm/s2]"], dex(u.cm / u.s ** 2)),
(["[K]"], dex(u.K)),
(["[-]"], dex(u.dimensionless_unscaled))])
def test_cds_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.CDS.parse(s)
assert unit2 == unit
@pytest.mark.parametrize('string', [
'0.1 nm',
'solMass(3/2)',
'km / s',
'km s-1',
'pix0.1nm',
'pix/(0.1nm)',
'km*s',
'km**2',
'5x8+3m',
'0.1---',
'---m',
'm---',
'--',
'0.1-',
'-m',
'm-',
'mag(s-1)',
'dB(mW)',
'dex(cm s-2)',
'[--]'])
def test_cds_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.CDS.parse(string)
def test_cds_dimensionless():
assert u.Unit('---', format='cds') == u.dimensionless_unscaled
assert u.dimensionless_unscaled.to_string(format='cds') == "---"
def test_cds_log10_dimensionless():
assert u.Unit('[-]', format='cds') == u.dex(u.dimensionless_unscaled)
assert u.dex(u.dimensionless_unscaled).to_string(format='cds') == "[-]"
# These examples are taken from the EXAMPLES section of
# https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/
@pytest.mark.parametrize('strings, unit', [
(["count /s", "count/s", "count s**(-1)", "count / s", "count /s "],
u.count / u.s),
(["/pixel /s", "/(pixel * s)"], (u.pixel * u.s) ** -1),
(["count /m**2 /s /eV", "count m**(-2) * s**(-1) * eV**(-1)",
"count /(m**2 * s * eV)"],
u.count * u.m ** -2 * u.s ** -1 * u.eV ** -1),
(["erg /pixel /s /GHz", "erg /s /GHz /pixel", "erg /pixel /(s * GHz)"],
u.erg / (u.s * u.GHz * u.pixel)),
(["keV**2 /yr /angstrom", "10**(10) keV**2 /yr /m"],
# Though this is given as an example, it seems to violate the rules
# of not raising scales to powers, so I'm just excluding it
# "(10**2 MeV)**2 /yr /m"
u.keV**2 / (u.yr * u.angstrom)),
(["10**(46) erg /s", "10**46 erg /s", "10**(39) J /s", "10**(39) W",
"10**(15) YW", "YJ /fs"],
10**46 * u.erg / u.s),
(["10**(-7) J /cm**2 /MeV", "10**(-9) J m**(-2) eV**(-1)",
"nJ m**(-2) eV**(-1)", "nJ /m**2 /eV"],
10 ** -7 * u.J * u.cm ** -2 * u.MeV ** -1),
(["sqrt(erg /pixel /s /GHz)", "(erg /pixel /s /GHz)**(0.5)",
"(erg /pixel /s /GHz)**(1/2)",
"erg**(0.5) pixel**(-0.5) s**(-0.5) GHz**(-0.5)"],
(u.erg * u.pixel ** -1 * u.s ** -1 * u.GHz ** -1) ** 0.5),
(["(count /s) (/pixel /s)", "(count /s) * (/pixel /s)",
"count /pixel /s**2"],
(u.count / u.s) * (1.0 / (u.pixel * u.s)))])
def test_ogip_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.OGIP.parse(s)
assert unit2 == unit
@pytest.mark.parametrize('string', [
'log(photon /m**2 /s /Hz)',
'sin( /pixel /s)',
'log(photon /cm**2 /s /Hz) /(sin( /pixel /s))',
'log(photon /cm**2 /s /Hz) (sin( /pixel /s))**(-1)',
'dB(mW)', 'dex(cm/s**2)'])
def test_ogip_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.OGIP.parse(string)
class RoundtripBase:
deprecated_units = set()
def check_roundtrip(self, unit, output_format=None):
if output_format is None:
output_format = self.format_
with warnings.catch_warnings():
warnings.simplefilter('ignore') # Same warning shows up multiple times
s = unit.to_string(output_format)
if s in self.deprecated_units:
with pytest.warns(UnitsWarning, match='deprecated') as w:
a = core.Unit(s, format=self.format_)
assert len(w) == 1
else:
a = core.Unit(s, format=self.format_) # No warning
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-9)
def check_roundtrip_decompose(self, unit):
ud = unit.decompose()
s = ud.to_string(self.format_)
assert ' ' not in s
a = core.Unit(s, format=self.format_)
assert_allclose(a.decompose().scale, ud.scale, rtol=1e-5)
class TestRoundtripGeneric(RoundtripBase):
format_ = 'generic'
@pytest.mark.parametrize('unit', [
unit for unit in u.__dict__.values()
if (isinstance(unit, core.UnitBase) and
not isinstance(unit, core.PrefixUnit))])
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
self.check_roundtrip(unit, output_format='unicode')
self.check_roundtrip_decompose(unit)
class TestRoundtripVOUnit(RoundtripBase):
format_ = 'vounit'
deprecated_units = u_format.VOUnit._deprecated_units
@pytest.mark.parametrize('unit', [
unit for unit in u_format.VOUnit._units.values()
if (isinstance(unit, core.UnitBase) and
not isinstance(unit, core.PrefixUnit))])
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
if unit not in (u.mag, u.dB):
self.check_roundtrip_decompose(unit)
class TestRoundtripFITS(RoundtripBase):
format_ = 'fits'
deprecated_units = u_format.Fits._deprecated_units
@pytest.mark.parametrize('unit', [
unit for unit in u_format.Fits._units.values()
if (isinstance(unit, core.UnitBase) and
not isinstance(unit, core.PrefixUnit))])
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
class TestRoundtripCDS(RoundtripBase):
format_ = 'cds'
@pytest.mark.parametrize('unit', [
unit for unit in u_format.CDS._units.values()
if (isinstance(unit, core.UnitBase) and
not isinstance(unit, core.PrefixUnit))])
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
if unit == u.mag:
# Skip mag: decomposes into dex, which is unknown to CDS.
return
self.check_roundtrip_decompose(unit)
@pytest.mark.parametrize('unit', [u.dex(unit) for unit in
(u.cm/u.s**2, u.K, u.Lsun)])
def test_roundtrip_dex(self, unit):
string = unit.to_string(format='cds')
recovered = u.Unit(string, format='cds')
assert recovered == unit
class TestRoundtripOGIP(RoundtripBase):
format_ = 'ogip'
deprecated_units = u_format.OGIP._deprecated_units | {'d'}
@pytest.mark.parametrize('unit', [
unit for unit in u_format.OGIP._units.values()
if (isinstance(unit, core.UnitBase) and
not isinstance(unit, core.PrefixUnit))])
def test_roundtrip(self, unit):
if str(unit) in ('d', '0.001 Crab'):
# Special-case day, which gets auto-converted to hours, and mCrab,
# which the default check does not recognize as a deprecated unit.
with pytest.warns(UnitsWarning):
s = unit.to_string(self.format_)
a = core.Unit(s, format=self.format_)
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-9)
else:
self.check_roundtrip(unit)
if str(unit) in ('mag', 'byte', 'Crab'):
# Skip mag and byte, which decompose into dex and bit, resp.,
# both of which are unknown to OGIP, as well as Crab, which does
# not decompose, and thus gives a deprecated unit warning.
return
power_of_ten = np.log10(unit.decompose().scale)
if abs(power_of_ten - round(power_of_ten)) > 1e-3:
ctx = pytest.warns(UnitsWarning, match='power of 10')
elif str(unit) == '0.001 Crab':
ctx = pytest.warns(UnitsWarning, match='deprecated')
else:
ctx = nullcontext()
with ctx:
self.check_roundtrip_decompose(unit)
def test_fits_units_available():
u_format.Fits._units
def test_vo_units_available():
u_format.VOUnit._units
def test_cds_units_available():
u_format.CDS._units
def test_cds_non_ascii_unit():
"""Regression test for #5350. This failed with a decoding error as
μas could not be represented in ascii."""
from astropy.units import cds
with cds.enable():
u.radian.find_equivalent_units(include_prefix_units=True)
def test_latex():
fluxunit = u.erg / (u.cm ** 2 * u.s)
assert fluxunit.to_string('latex') == r'$\mathrm{\frac{erg}{s\,cm^{2}}}$'
def test_new_style_latex():
fluxunit = u.erg / (u.cm ** 2 * u.s)
assert f"{fluxunit:latex}" == r'$\mathrm{\frac{erg}{s\,cm^{2}}}$'
def test_latex_scale():
fluxunit = u.Unit(1.e-24 * u.erg / (u.cm ** 2 * u.s * u.Hz))
latex = r'$\mathrm{1 \times 10^{-24}\,\frac{erg}{Hz\,s\,cm^{2}}}$'
assert fluxunit.to_string('latex') == latex
def test_latex_inline_scale():
fluxunit = u.Unit(1.e-24 * u.erg / (u.cm ** 2 * u.s * u.Hz))
latex_inline = (r'$\mathrm{1 \times 10^{-24}\,erg'
r'\,Hz^{-1}\,s^{-1}\,cm^{-2}}$')
assert fluxunit.to_string('latex_inline') == latex_inline
@pytest.mark.parametrize('format_spec, string', [
('generic', 'erg / (cm2 s)'),
('s', 'erg / (cm2 s)'),
('console', ' erg \n ------\n s cm^2'),
('latex', '$\\mathrm{\\frac{erg}{s\\,cm^{2}}}$'),
('latex_inline', '$\\mathrm{erg\\,s^{-1}\\,cm^{-2}}$'),
('>20s', ' erg / (cm2 s)')])
def test_format_styles(format_spec, string):
fluxunit = u.erg / (u.cm ** 2 * u.s)
assert format(fluxunit, format_spec) == string
def test_flatten_to_known():
myunit = u.def_unit("FOOBAR_One", u.erg / u.Hz)
assert myunit.to_string('fits') == 'erg Hz-1'
myunit2 = myunit * u.bit ** 3
assert myunit2.to_string('fits') == 'bit3 erg Hz-1'
def test_flatten_impossible():
myunit = u.def_unit("FOOBAR_Two")
with u.add_enabled_units(myunit), pytest.raises(ValueError):
myunit.to_string('fits')
def test_console_out():
"""
Issue #436.
"""
u.Jy.decompose().to_string('console')
def test_flexible_float():
assert u.min._represents.to_string('latex') == r'$\mathrm{60\,s}$'
def test_fits_to_string_function_error():
"""Test function raises TypeError on bad input.
This instead of returning None, see gh-11825.
"""
with pytest.raises(TypeError, match='unit argument must be'):
u_format.Fits.to_string(None)
def test_fraction_repr():
area = u.cm ** 2.0
assert '.' not in area.to_string('latex')
fractional = u.cm ** 2.5
assert '5/2' in fractional.to_string('latex')
assert fractional.to_string('unicode') == 'cm⁵⸍²'
def test_scale_effectively_unity():
"""Scale just off unity at machine precision level is OK.
Ensures #748 does not recur
"""
a = (3. * u.N).cgs
assert is_effectively_unity(a.unit.scale)
assert len(a.__repr__().split()) == 3
def test_percent():
"""Test that the % unit is properly recognized. Since % is a special
symbol, this goes slightly beyond the round-tripping tested above."""
assert u.Unit('%') == u.percent == u.Unit(0.01)
assert u.Unit('%', format='cds') == u.Unit(0.01)
assert u.Unit(0.01).to_string('cds') == '%'
with pytest.raises(ValueError):
u.Unit('%', format='fits')
with pytest.raises(ValueError):
u.Unit('%', format='vounit')
def test_scaled_dimensionless():
"""Test that scaled dimensionless units are properly recognized in generic
and CDS, but not in fits and vounit."""
assert u.Unit('0.1') == u.Unit(0.1) == 0.1 * u.dimensionless_unscaled
assert u.Unit('1.e-4') == u.Unit(1.e-4)
assert u.Unit('10-4', format='cds') == u.Unit(1.e-4)
assert u.Unit('10+8').to_string('cds') == '10+8'
with pytest.raises(ValueError):
u.Unit(0.15).to_string('fits')
assert u.Unit(0.1).to_string('fits') == '10**-1'
with pytest.raises(ValueError):
u.Unit(0.1).to_string('vounit')
def test_deprecated_did_you_mean_units():
with pytest.raises(ValueError) as exc_info:
u.Unit('ANGSTROM', format='fits')
assert 'Did you mean Angstrom or angstrom?' in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
u.Unit('crab', format='ogip')
assert 'Crab (deprecated)' in str(exc_info.value)
assert 'mCrab (deprecated)' in str(exc_info.value)
with pytest.warns(UnitsWarning, match=r'.* Did you mean 0\.1nm, Angstrom '
r'\(deprecated\) or angstrom \(deprecated\)\?') as w:
u.Unit('ANGSTROM', format='vounit')
assert len(w) == 1
assert str(w[0].message).count('0.1nm') == 1
with pytest.warns(UnitsWarning, match=r'.* 0\.1nm\.') as w:
u.Unit('angstrom', format='vounit')
assert len(w) == 1
@pytest.mark.parametrize('string', ['mag(ct/s)', 'dB(mW)', 'dex(cm s**-2)'])
def test_fits_function(string):
# Function units cannot be written, so ensure they're not parsed either.
with pytest.raises(ValueError):
print(string)
u_format.Fits().parse(string)
@pytest.mark.parametrize('string', ['mag(ct/s)', 'dB(mW)', 'dex(cm s**-2)'])
def test_vounit_function(string):
# Function units cannot be written, so ensure they're not parsed either.
with pytest.raises(ValueError), warnings.catch_warnings():
warnings.simplefilter('ignore') # ct, dex also raise warnings - irrelevant here.
u_format.VOUnit().parse(string)
def test_vounit_binary_prefix():
u.Unit('KiB', format='vounit') == u.Unit('1024 B')
u.Unit('Kibyte', format='vounit') == u.Unit('1024 B')
u.Unit('Kibit', format='vounit') == u.Unit('1024 B')
with pytest.warns(UnitsWarning) as w:
u.Unit('kibibyte', format='vounit')
assert len(w) == 1
def test_vounit_unknown():
assert u.Unit('unknown', format='vounit') is None
assert u.Unit('UNKNOWN', format='vounit') is None
assert u.Unit('', format='vounit') is u.dimensionless_unscaled
def test_vounit_details():
with pytest.warns(UnitsWarning, match='deprecated') as w:
assert u.Unit('Pa', format='vounit') is u.Pascal
assert len(w) == 1
# The da- prefix is not allowed, and the d- prefix is discouraged
assert u.dam.to_string('vounit') == '10m'
assert u.Unit('dam dag').to_string('vounit') == '100g.m'
# Parse round-trip
with pytest.warns(UnitsWarning, match='deprecated'):
flam = u.erg / u.cm / u.cm / u.s / u.AA
x = u.format.VOUnit.to_string(flam)
assert x == 'Angstrom**-1.cm**-2.erg.s**-1'
new_flam = u.format.VOUnit.parse(x)
assert new_flam == flam
@pytest.mark.parametrize('unit, vounit, number, scale, voscale',
[('nm', 'nm', 0.1, '10^-1', '0.1'),
('fm', 'fm', 100.0, '10+2', '100'),
('m^2', 'm**2', 100.0, '100.0', '100'),
('cm', 'cm', 2.54, '2.54', '2.54'),
('kg', 'kg', 1.898124597e27, '1.898124597E27', '1.8981246e+27'),
('m/s', 'm.s**-1', 299792458.0, '299792458', '2.9979246e+08'),
('cm2', 'cm**2', 1.e-20, '10^(-20)', '1e-20')])
def test_vounit_scale_factor(unit, vounit, number, scale, voscale):
x = u.Unit(f'{scale} {unit}')
assert x == number * u.Unit(unit)
assert x.to_string(format='vounit') == voscale + vounit
def test_vounit_custom():
x = u.Unit("'foo' m", format='vounit')
x_vounit = x.to_string('vounit')
assert x_vounit == "'foo'.m"
x_string = x.to_string()
assert x_string == "foo m"
x = u.Unit("m'foo' m", format='vounit')
assert x.bases[1]._represents.scale == 0.001
x_vounit = x.to_string('vounit')
assert x_vounit == "m.m'foo'"
x_string = x.to_string()
assert x_string == 'm mfoo'
def test_vounit_implicit_custom():
# Yikes, this becomes "femto-urlong"... But at least there's a warning.
with pytest.warns(UnitsWarning) as w:
x = u.Unit("furlong/week", format="vounit")
assert x.bases[0]._represents.scale == 1e-15
assert x.bases[0]._represents.bases[0].name == 'urlong'
assert len(w) == 2
assert 'furlong' in str(w[0].message)
assert 'week' in str(w[1].message)
@pytest.mark.parametrize('scale, number, string',
[('10+2', 100, '10**2'),
('10(+2)', 100, '10**2'),
('10**+2', 100, '10**2'),
('10**(+2)', 100, '10**2'),
('10^+2', 100, '10**2'),
('10^(+2)', 100, '10**2'),
('10**2', 100, '10**2'),
('10**(2)', 100, '10**2'),
('10^2', 100, '10**2'),
('10^(2)', 100, '10**2'),
('10-20', 10**(-20), '10**-20'),
('10(-20)', 10**(-20), '10**-20'),
('10**-20', 10**(-20), '10**-20'),
('10**(-20)', 10**(-20), '10**-20'),
('10^-20', 10**(-20), '10**-20'),
('10^(-20)', 10**(-20), '10**-20'),
])
def test_fits_scale_factor(scale, number, string):
x = u.Unit(scale + ' erg/(s cm**2 Angstrom)', format='fits')
assert x == number * (u.erg / u.s / u.cm ** 2 / u.Angstrom)
assert x.to_string(format='fits') == string + ' Angstrom-1 cm-2 erg s-1'
x = u.Unit(scale + '*erg/(s cm**2 Angstrom)', format='fits')
assert x == number * (u.erg / u.s / u.cm ** 2 / u.Angstrom)
assert x.to_string(format='fits') == string + ' Angstrom-1 cm-2 erg s-1'
def test_fits_scale_factor_errors():
with pytest.raises(ValueError):
x = u.Unit('1000 erg/(s cm**2 Angstrom)', format='fits')
with pytest.raises(ValueError):
x = u.Unit('12 erg/(s cm**2 Angstrom)', format='fits')
x = u.Unit(1.2 * u.erg)
with pytest.raises(ValueError):
x.to_string(format='fits')
x = u.Unit(100.0 * u.erg)
assert x.to_string(format='fits') == '10**2 erg'
def test_double_superscript():
"""Regression test for #5870, #8699, #9218; avoid double superscripts."""
assert (u.deg).to_string("latex") == r'$\mathrm{{}^{\circ}}$'
assert (u.deg**2).to_string("latex") == r'$\mathrm{deg^{2}}$'
assert (u.arcmin).to_string("latex") == r'$\mathrm{{}^{\prime}}$'
assert (u.arcmin**2).to_string("latex") == r'$\mathrm{arcmin^{2}}$'
assert (u.arcsec).to_string("latex") == r'$\mathrm{{}^{\prime\prime}}$'
assert (u.arcsec**2).to_string("latex") == r'$\mathrm{arcsec^{2}}$'
assert (u.hourangle).to_string("latex") == r'$\mathrm{{}^{h}}$'
assert (u.hourangle**2).to_string("latex") == r'$\mathrm{hourangle^{2}}$'
assert (u.electron).to_string("latex") == r'$\mathrm{e^{-}}$'
assert (u.electron**2).to_string("latex") == r'$\mathrm{electron^{2}}$'
@pytest.mark.parametrize('power,expected', (
(1., 'm'), (2., 'm2'), (-10, '1 / m10'), (1.5, 'm(3/2)'), (2/3, 'm(2/3)'),
(7/11, 'm(7/11)'), (-1/64, '1 / m(1/64)'), (1/100, 'm(1/100)'),
(2/101, 'm(0.019801980198019802)'), (Fraction(2, 101), 'm(2/101)')))
def test_powers(power, expected):
"""Regression test for #9279 - powers should not be oversimplified."""
unit = u.m ** power
s = unit.to_string()
assert s == expected
assert unit == s
@pytest.mark.parametrize('string,unit', [
('\N{MICRO SIGN}g', u.microgram),
('\N{GREEK SMALL LETTER MU}g', u.microgram),
('g\N{MINUS SIGN}1', u.g**(-1)),
('m\N{SUPERSCRIPT MINUS}\N{SUPERSCRIPT ONE}', 1 / u.m),
('m s\N{SUPERSCRIPT MINUS}\N{SUPERSCRIPT ONE}', u.m / u.s),
('m\N{SUPERSCRIPT TWO}', u.m**2),
('m\N{SUPERSCRIPT PLUS SIGN}\N{SUPERSCRIPT TWO}', u.m**2),
('m\N{SUPERSCRIPT THREE}', u.m**3),
('m\N{SUPERSCRIPT ONE}\N{SUPERSCRIPT ZERO}', u.m**10),
('\N{GREEK CAPITAL LETTER OMEGA}', u.ohm),
('\N{OHM SIGN}', u.ohm), # deprecated but for compatibility
('\N{MICRO SIGN}\N{GREEK CAPITAL LETTER OMEGA}', u.microOhm),
('\N{ANGSTROM SIGN}', u.Angstrom),
('\N{ANGSTROM SIGN} \N{OHM SIGN}', u.Angstrom * u.Ohm),
('\N{LATIN CAPITAL LETTER A WITH RING ABOVE}', u.Angstrom),
('\N{LATIN CAPITAL LETTER A}\N{COMBINING RING ABOVE}', u.Angstrom),
('m\N{ANGSTROM SIGN}', u.milliAngstrom),
('°C', u.deg_C),
('°', u.deg),
('M⊙', u.Msun), # \N{CIRCLED DOT OPERATOR}
('L☉', u.Lsun), # \N{SUN}
('M⊕', u.Mearth), # normal earth symbol = \N{CIRCLED PLUS}
('M♁', u.Mearth), # be generous with \N{EARTH}
('R♃', u.Rjup), # \N{JUPITER}
('′', u.arcmin), # \N{PRIME}
('R∞', u.Ry),
('Mₚ', u.M_p),
])
def test_unicode(string, unit):
assert u_format.Generic.parse(string) == unit
assert u.Unit(string) == unit
@pytest.mark.parametrize('string', [
'g\N{MICRO SIGN}',
'g\N{MINUS SIGN}',
'm\N{SUPERSCRIPT MINUS}1',
'm+\N{SUPERSCRIPT ONE}',
'm\N{MINUS SIGN}\N{SUPERSCRIPT ONE}',
'k\N{ANGSTROM SIGN}',
])
def test_unicode_failures(string):
with pytest.raises(ValueError):
u.Unit(string)
@pytest.mark.parametrize('format_', ('unicode', 'latex', 'latex_inline'))
def test_parse_error_message_for_output_only_format(format_):
with pytest.raises(NotImplementedError, match='not parse'):
u.Unit('m', format=format_)
def test_unknown_parser():
with pytest.raises(ValueError, match=r"Unknown.*unicode'\] for output only"):
u.Unit('m', format='foo')
|
23a8316043e199e0a5ba502598b70e1c8f264b8f0a16710faf47946377b43c7e | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Regression tests for the units package."""
import pickle
from fractions import Fraction
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import constants as c
from astropy import units as u
from astropy.units import utils
def test_initialisation():
assert u.Unit(u.m) is u.m
ten_meter = u.Unit(10.*u.m)
assert ten_meter == u.CompositeUnit(10., [u.m], [1])
assert u.Unit(ten_meter) is ten_meter
assert u.Unit(10.*ten_meter) == u.CompositeUnit(100., [u.m], [1])
foo = u.Unit('foo', (10. * ten_meter)**2, namespace=locals())
assert foo == u.CompositeUnit(10000., [u.m], [2])
assert u.Unit('m') == u.m
assert u.Unit('') == u.dimensionless_unscaled
assert u.one == u.dimensionless_unscaled
assert u.Unit('10 m') == ten_meter
assert u.Unit(10.) == u.CompositeUnit(10., [], [])
assert u.Unit() == u.dimensionless_unscaled
def test_invalid_power():
x = u.m ** Fraction(1, 3)
assert isinstance(x.powers[0], Fraction)
x = u.m ** Fraction(1, 2)
assert isinstance(x.powers[0], float)
# Test the automatic conversion to a fraction
x = u.m ** (1. / 3.)
assert isinstance(x.powers[0], Fraction)
def test_invalid_compare():
assert not (u.m == u.s)
def test_convert():
assert u.h._get_converter(u.s)(1) == 3600
def test_convert_fail():
with pytest.raises(u.UnitsError):
u.cm.to(u.s, 1)
with pytest.raises(u.UnitsError):
(u.cm / u.s).to(u.m, 1)
def test_composite():
assert (u.cm / u.s * u.h)._get_converter(u.m)(1) == 36
assert u.cm * u.cm == u.cm ** 2
assert u.cm * u.cm * u.cm == u.cm ** 3
assert u.Hz.to(1000 * u.Hz, 1) == 0.001
def test_str():
assert str(u.cm) == "cm"
def test_repr():
assert repr(u.cm) == 'Unit("cm")'
def test_represents():
assert u.m.represents is u.m
assert u.km.represents.scale == 1000.
assert u.km.represents.bases == [u.m]
assert u.Ry.scale == 1.0 and u.Ry.bases == [u.Ry]
assert_allclose(u.Ry.represents.scale, 13.605692518464949)
assert u.Ry.represents.bases == [u.eV]
bla = u.def_unit('bla', namespace=locals())
assert bla.represents is bla
blabla = u.def_unit('blabla', 10 * u.hr, namespace=locals())
assert blabla.represents.scale == 10.
assert blabla.represents.bases == [u.hr]
assert blabla.decompose().scale == 10 * 3600
assert blabla.decompose().bases == [u.s]
def test_units_conversion():
assert_allclose(u.kpc.to(u.Mpc), 0.001)
assert_allclose(u.Mpc.to(u.kpc), 1000)
assert_allclose(u.yr.to(u.Myr), 1.e-6)
assert_allclose(u.AU.to(u.pc), 4.84813681e-6)
assert_allclose(u.cycle.to(u.rad), 6.283185307179586)
assert_allclose(u.spat.to(u.sr), 12.56637061435917)
def test_units_manipulation():
# Just do some manipulation and check it's happy
(u.kpc * u.yr) ** Fraction(1, 3) / u.Myr
(u.AA * u.erg) ** 9
def test_decompose():
assert u.Ry == u.Ry.decompose()
def test_dimensionless_to_si():
"""
Issue #1150: Test for conversion of dimensionless quantities
to the SI system
"""
testunit = ((1.0 * u.kpc) / (1.0 * u.Mpc))
assert testunit.unit.physical_type == 'dimensionless'
assert_allclose(testunit.si, 0.001)
def test_dimensionless_to_cgs():
"""
Issue #1150: Test for conversion of dimensionless quantities
to the CGS system
"""
testunit = ((1.0 * u.m) / (1.0 * u.km))
assert testunit.unit.physical_type == 'dimensionless'
assert_allclose(testunit.cgs, 0.001)
def test_unknown_unit():
with pytest.warns(u.UnitsWarning, match='FOO'):
u.Unit("FOO", parse_strict='warn')
def test_multiple_solidus():
with pytest.warns(u.UnitsWarning, match="'m/s/kg' contains multiple "
"slashes, which is discouraged"):
assert u.Unit("m/s/kg").to_string() == 'm / (kg s)'
with pytest.raises(ValueError):
u.Unit("m/s/kg", format="vounit")
# Regression test for #9000: solidi in exponents do not count towards this.
x = u.Unit("kg(3/10) * m(5/2) / s", format="vounit")
assert x.to_string() == 'kg(3/10) m(5/2) / s'
def test_unknown_unit3():
unit = u.Unit("FOO", parse_strict='silent')
assert isinstance(unit, u.UnrecognizedUnit)
assert unit.name == "FOO"
unit2 = u.Unit("FOO", parse_strict='silent')
assert unit == unit2
assert unit.is_equivalent(unit2)
unit3 = u.Unit("BAR", parse_strict='silent')
assert unit != unit3
assert not unit.is_equivalent(unit3)
# Also test basic (in)equalities.
assert unit == "FOO"
assert unit != u.m
# next two from gh-7603.
assert unit != None # noqa
assert unit not in (None, u.m)
with pytest.raises(ValueError):
unit._get_converter(unit3)
_ = unit.to_string('latex')
_ = unit2.to_string('cgs')
with pytest.raises(ValueError):
u.Unit("BAR", parse_strict='strict')
with pytest.raises(TypeError):
u.Unit(None)
def test_invalid_scale():
with pytest.raises(TypeError):
['a', 'b', 'c'] * u.m
def test_cds_power():
unit = u.Unit("10+22/cm2", format="cds", parse_strict='silent')
assert unit.scale == 1e22
def test_register():
foo = u.def_unit("foo", u.m ** 3, namespace=locals())
assert 'foo' in locals()
with u.add_enabled_units(foo):
assert 'foo' in u.get_current_unit_registry().registry
assert 'foo' not in u.get_current_unit_registry().registry
def test_in_units():
speed_unit = u.cm / u.s
_ = speed_unit.in_units(u.pc / u.hour, 1)
def test_null_unit():
assert (u.m / u.m) == u.Unit(1)
def test_unrecognized_equivalency():
assert u.m.is_equivalent('foo') is False
assert u.m.is_equivalent('pc') is True
def test_convertible_exception():
with pytest.raises(u.UnitsError, match=r'length.+ are not convertible'):
u.AA.to(u.h * u.s ** 2)
def test_convertible_exception2():
with pytest.raises(u.UnitsError, match=r'length. and .+time.+ are not convertible'):
u.m.to(u.s)
def test_invalid_type():
class A:
pass
with pytest.raises(TypeError):
u.Unit(A())
def test_steradian():
"""
Issue #599
"""
assert u.sr.is_equivalent(u.rad * u.rad)
results = u.sr.compose(units=u.cgs.bases)
assert results[0].bases[0] is u.rad
results = u.sr.compose(units=u.cgs.__dict__)
assert results[0].bases[0] is u.sr
def test_decompose_bases():
"""
From issue #576
"""
from astropy.constants import e
from astropy.units import cgs
d = e.esu.unit.decompose(bases=cgs.bases)
assert d._bases == [u.cm, u.g, u.s]
assert d._powers == [Fraction(3, 2), 0.5, -1]
assert d._scale == 1.0
def test_complex_compose():
complex = u.cd * u.sr * u.Wb
composed = complex.compose()
assert set(composed[0]._bases) == set([u.lm, u.Wb])
def test_equiv_compose():
composed = u.m.compose(equivalencies=u.spectral())
assert any([u.Hz] == x.bases for x in composed)
def test_empty_compose():
with pytest.raises(u.UnitsError):
u.m.compose(units=[])
def _unit_as_str(unit):
# This function serves two purposes - it is used to sort the units to
# test alphabetically, and it is also use to allow pytest to show the unit
# in the [] when running the parametrized tests.
return str(unit)
# We use a set to make sure we don't have any duplicates.
COMPOSE_ROUNDTRIP = set()
for val in u.__dict__.values():
if (isinstance(val, u.UnitBase) and
not isinstance(val, u.PrefixUnit)):
COMPOSE_ROUNDTRIP.add(val)
@pytest.mark.parametrize('unit', sorted(COMPOSE_ROUNDTRIP, key=_unit_as_str), ids=_unit_as_str)
def test_compose_roundtrip(unit):
composed_list = unit.decompose().compose()
found = False
for composed in composed_list:
if len(composed.bases):
if composed.bases[0] is unit:
found = True
break
elif len(unit.bases) == 0:
found = True
break
assert found
# We use a set to make sure we don't have any duplicates.
COMPOSE_CGS_TO_SI = set()
for val in u.cgs.__dict__.values():
# Can't decompose Celsius
if (isinstance(val, u.UnitBase) and
not isinstance(val, u.PrefixUnit) and
val != u.cgs.deg_C):
COMPOSE_CGS_TO_SI.add(val)
@pytest.mark.parametrize('unit', sorted(COMPOSE_CGS_TO_SI, key=_unit_as_str),
ids=_unit_as_str)
def test_compose_cgs_to_si(unit):
si = unit.to_system(u.si)
assert [x.is_equivalent(unit) for x in si]
assert si[0] == unit.si
# We use a set to make sure we don't have any duplicates.
COMPOSE_SI_TO_CGS = set()
for val in u.si.__dict__.values():
# Can't decompose Celsius
if (isinstance(val, u.UnitBase) and
not isinstance(val, u.PrefixUnit) and
val != u.si.deg_C):
COMPOSE_SI_TO_CGS.add(val)
@pytest.mark.parametrize('unit', sorted(COMPOSE_SI_TO_CGS, key=_unit_as_str), ids=_unit_as_str)
def test_compose_si_to_cgs(unit):
# Can't convert things with Ampere to CGS without more context
try:
cgs = unit.to_system(u.cgs)
except u.UnitsError:
if u.A in unit.decompose().bases:
pass
else:
raise
else:
assert [x.is_equivalent(unit) for x in cgs]
assert cgs[0] == unit.cgs
def test_to_si():
"""Check units that are not official derived units.
Should not appear on its own or as part of a composite unit.
"""
# TODO: extend to all units not listed in Tables 1--6 of
# https://physics.nist.gov/cuu/Units/units.html
# See gh-10585.
# This was always the case
assert u.bar.si is not u.bar
# But this used to fail.
assert u.bar not in (u.kg/(u.s**2*u.sr*u.nm)).si._bases
def test_to_cgs():
assert u.Pa.to_system(u.cgs)[1]._bases[0] is u.Ba
assert u.Pa.to_system(u.cgs)[1]._scale == 10.0
def test_decompose_to_cgs():
from astropy.units import cgs
assert u.m.decompose(bases=cgs.bases)._bases[0] is cgs.cm
def test_compose_issue_579():
unit = u.kg * u.s ** 2 / u.m
result = unit.compose(units=[u.N, u.s, u.m])
assert len(result) == 1
assert result[0]._bases == [u.s, u.N, u.m]
assert result[0]._powers == [4, 1, -2]
def test_compose_prefix_unit():
x = u.m.compose(units=(u.m,))
assert x[0].bases[0] is u.m
assert x[0].scale == 1.0
x = u.m.compose(units=[u.km], include_prefix_units=True)
assert x[0].bases[0] is u.km
assert x[0].scale == 0.001
x = u.m.compose(units=[u.km])
assert x[0].bases[0] is u.km
assert x[0].scale == 0.001
x = (u.km/u.s).compose(units=(u.pc, u.Myr))
assert x[0].bases == [u.pc, u.Myr]
assert_allclose(x[0].scale, 1.0227121650537077)
with pytest.raises(u.UnitsError):
(u.km/u.s).compose(units=(u.pc, u.Myr), include_prefix_units=False)
def test_self_compose():
unit = u.kg * u.s
assert len(unit.compose(units=[u.g, u.s])) == 1
def test_compose_failed():
unit = u.kg
with pytest.raises(u.UnitsError):
unit.compose(units=[u.N])
def test_compose_fractional_powers():
# Warning: with a complicated unit, this test becomes very slow;
# e.g., x = (u.kg / u.s ** 3 * u.au ** 2.5 / u.yr ** 0.5 / u.sr ** 2)
# takes 3 s
x = u.m ** 0.5 / u.yr ** 1.5
factored = x.compose()
for unit in factored:
assert x.decompose() == unit.decompose()
factored = x.compose(units=u.cgs)
for unit in factored:
assert x.decompose() == unit.decompose()
factored = x.compose(units=u.si)
for unit in factored:
assert x.decompose() == unit.decompose()
def test_compose_best_unit_first():
results = u.l.compose()
assert len(results[0].bases) == 1
assert results[0].bases[0] is u.l
results = (u.s ** -1).compose()
assert results[0].bases[0] in (u.Hz, u.Bq)
results = (u.Ry.decompose()).compose()
assert results[0].bases[0] is u.Ry
def test_compose_no_duplicates():
new = u.kg / u.s ** 3 * u.au ** 2.5 / u.yr ** 0.5 / u.sr ** 2
composed = new.compose(units=u.cgs.bases)
assert len(composed) == 1
def test_long_int():
"""
Issue #672
"""
sigma = 10 ** 21 * u.M_p / u.cm ** 2
sigma.to(u.M_sun / u.pc ** 2)
def test_endian_independence():
"""
Regression test for #744
A logic issue in the units code meant that big endian arrays could not be
converted because the dtype is '>f4', not 'float32', and the code was
looking for the strings 'float' or 'int'.
"""
for endian in ['<', '>']:
for ntype in ['i', 'f']:
for byte in ['4', '8']:
x = np.array([1, 2, 3], dtype=(endian + ntype + byte))
u.m.to(u.cm, x)
def test_radian_base():
"""
Issue #863
"""
assert (1 * u.degree).si.unit == u.rad
def test_no_as():
# We don't define 'as', since it is a keyword, but we
# do want to define the long form (`attosecond`).
assert not hasattr(u, 'as')
assert hasattr(u, 'attosecond')
def test_no_duplicates_in_names():
# Regression test for #5036
assert u.ct.names == ['ct', 'count']
assert u.ct.short_names == ['ct', 'count']
assert u.ct.long_names == ['count']
assert set(u.ph.names) == set(u.ph.short_names) | set(u.ph.long_names)
def test_pickling():
p = pickle.dumps(u.m)
other = pickle.loads(p)
assert other is u.m
new_unit = u.IrreducibleUnit(['foo'], format={'baz': 'bar'})
# This is local, so the unit should not be registered.
assert 'foo' not in u.get_current_unit_registry().registry
# Test pickling of this unregistered unit.
p = pickle.dumps(new_unit)
new_unit_copy = pickle.loads(p)
assert new_unit_copy is not new_unit
assert new_unit_copy.names == ['foo']
assert new_unit_copy.get_format_name('baz') == 'bar'
# It should still not be registered.
assert 'foo' not in u.get_current_unit_registry().registry
# Now try the same with a registered unit.
with u.add_enabled_units([new_unit]):
p = pickle.dumps(new_unit)
assert 'foo' in u.get_current_unit_registry().registry
new_unit_copy = pickle.loads(p)
assert new_unit_copy is new_unit
# Check that a registered unit can be loaded and that it gets re-enabled.
with u.add_enabled_units([]):
assert 'foo' not in u.get_current_unit_registry().registry
new_unit_copy = pickle.loads(p)
assert new_unit_copy is not new_unit
assert new_unit_copy.names == ['foo']
assert new_unit_copy.get_format_name('baz') == 'bar'
assert 'foo' in u.get_current_unit_registry().registry
# And just to be sure, that it gets removed outside of the context.
assert 'foo' not in u.get_current_unit_registry().registry
def test_pickle_between_sessions():
"""We cannot really test between sessions easily, so fake it.
This test can be changed if the pickle protocol or the code
changes enough that it no longer works.
"""
hash_m = hash(u.m)
unit = pickle.loads(
b'\x80\x04\x95\xd6\x00\x00\x00\x00\x00\x00\x00\x8c\x12'
b'astropy.units.core\x94\x8c\x1a_recreate_irreducible_unit'
b'\x94\x93\x94h\x00\x8c\x0fIrreducibleUnit\x94\x93\x94]\x94'
b'(\x8c\x01m\x94\x8c\x05meter\x94e\x88\x87\x94R\x94}\x94(\x8c\x06'
b'_names\x94]\x94(h\x06h\x07e\x8c\x0c_short_names'
b'\x94]\x94h\x06a\x8c\x0b_long_names\x94]\x94h\x07a\x8c\x07'
b'_format\x94}\x94\x8c\x07__doc__\x94\x8c '
b'meter: base unit of length in SI\x94ub.')
assert unit is u.m
assert hash(u.m) == hash_m
@pytest.mark.parametrize('unit', [
u.IrreducibleUnit(['foo'], format={'baz': 'bar'}),
u.Unit('m_per_s', u.m/u.s)])
def test_pickle_does_not_keep_memoized_hash(unit):
"""
Tests private attribute since the problem with _hash being pickled
and restored only appeared if the unpickling was done in another
session, for which the hash no longer was valid, and it is difficult
to mimic separate sessions in a simple test. See gh-11872.
"""
unit_hash = hash(unit)
assert unit._hash is not None
unit_copy = pickle.loads(pickle.dumps(unit))
# unit is not registered so we get a copy.
assert unit_copy is not unit
assert unit_copy._hash is None
assert hash(unit_copy) == unit_hash
with u.add_enabled_units([unit]):
# unit is registered, so we get a reference.
unit_ref = pickle.loads(pickle.dumps(unit))
if isinstance(unit, u.IrreducibleUnit):
assert unit_ref is unit
else:
assert unit_ref is not unit
# pickle.load used to override the hash, although in this case
# it would be the same anyway, so not clear this tests much.
assert hash(unit) == unit_hash
def test_pickle_unrecognized_unit():
"""
Issue #2047
"""
a = u.Unit('asdf', parse_strict='silent')
pickle.loads(pickle.dumps(a))
def test_duplicate_define():
with pytest.raises(ValueError):
u.def_unit('m', namespace=u.__dict__)
def test_all_units():
from astropy.units.core import get_current_unit_registry
registry = get_current_unit_registry()
assert len(registry.all_units) > len(registry.non_prefix_units)
def test_repr_latex():
assert u.m._repr_latex_() == u.m.to_string('latex')
def test_operations_with_strings():
assert u.m / '5s' == (u.m / (5.0 * u.s))
assert u.m * '5s' == (5.0 * u.m * u.s)
def test_comparison():
assert u.m > u.cm
assert u.m >= u.cm
assert u.cm < u.m
assert u.cm <= u.m
with pytest.raises(u.UnitsError):
u.m > u.kg
def test_compose_into_arbitrary_units():
# Issue #1438
from astropy.constants import G
G.decompose([u.kg, u.km, u.Unit("15 s")])
def test_unit_multiplication_with_string():
"""Check that multiplication with strings produces the correct unit."""
u1 = u.cm
us = 'kg'
assert us * u1 == u.Unit(us) * u1
assert u1 * us == u1 * u.Unit(us)
def test_unit_division_by_string():
"""Check that multiplication with strings produces the correct unit."""
u1 = u.cm
us = 'kg'
assert us / u1 == u.Unit(us) / u1
assert u1 / us == u1 / u.Unit(us)
def test_sorted_bases():
"""See #1616."""
assert (u.m * u.Jy).bases == (u.Jy * u.m).bases
def test_megabit():
"""See #1543"""
assert u.Mbit is u.Mb
assert u.megabit is u.Mb
assert u.Mbyte is u.MB
assert u.megabyte is u.MB
def test_composite_unit_get_format_name():
"""See #1576"""
unit1 = u.Unit('nrad/s')
unit2 = u.Unit('Hz(1/2)')
assert (str(u.CompositeUnit(1, [unit1, unit2], [1, -1])) ==
'nrad / (Hz(1/2) s)')
def test_unicode_policy():
from astropy.tests.helper import assert_follows_unicode_guidelines
assert_follows_unicode_guidelines(
u.degree, roundtrip=u.__dict__)
def test_suggestions():
for search, matches in [
('microns', 'micron'),
('s/microns', 'micron'),
('M', 'm'),
('metre', 'meter'),
('angstroms', 'Angstrom or angstrom'),
('milimeter', 'millimeter'),
('ångström', 'Angstrom, angstrom, mAngstrom or mangstrom'),
('kev', 'EV, eV, kV or keV')]:
with pytest.raises(ValueError, match=f'Did you mean {matches}'):
u.Unit(search)
def test_fits_hst_unit():
"""See #1911."""
with pytest.warns(u.UnitsWarning, match='multiple slashes') as w:
x = u.Unit("erg /s /cm**2 /angstrom")
assert x == u.erg * u.s ** -1 * u.cm ** -2 * u.angstrom ** -1
assert len(w) == 1
def test_barn_prefixes():
"""Regression test for https://github.com/astropy/astropy/issues/3753"""
assert u.fbarn is u.femtobarn
assert u.pbarn is u.picobarn
def test_fractional_powers():
"""See #2069"""
m = 1e9 * u.Msun
tH = 1. / (70. * u.km / u.s / u.Mpc)
vc = 200 * u.km/u.s
x = (c.G ** 2 * m ** 2 * tH.cgs) ** Fraction(1, 3) / vc
v1 = x.to('pc')
x = (c.G ** 2 * m ** 2 * tH) ** Fraction(1, 3) / vc
v2 = x.to('pc')
x = (c.G ** 2 * m ** 2 * tH.cgs) ** (1.0 / 3.0) / vc
v3 = x.to('pc')
x = (c.G ** 2 * m ** 2 * tH) ** (1.0 / 3.0) / vc
v4 = x.to('pc')
assert_allclose(v1, v2)
assert_allclose(v2, v3)
assert_allclose(v3, v4)
x = u.m ** (1.0 / 101.0)
assert isinstance(x.powers[0], float)
x = u.m ** (3.0 / 7.0)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0].numerator == 3
assert x.powers[0].denominator == 7
x = u.cm ** Fraction(1, 2) * u.cm ** Fraction(2, 3)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0] == Fraction(7, 6)
# Regression test for #9258.
x = (u.TeV ** (-2.2)) ** (1/-2.2)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0] == Fraction(1, 1)
def test_sqrt_mag():
sqrt_mag = u.mag ** 0.5
assert hasattr(sqrt_mag.decompose().scale, 'imag')
assert (sqrt_mag.decompose())**2 == u.mag
def test_composite_compose():
# Issue #2382
composite_unit = u.s.compose(units=[u.Unit("s")])[0]
u.s.compose(units=[composite_unit])
def test_data_quantities():
assert u.byte.is_equivalent(u.bit)
def test_compare_with_none():
# Ensure that equality comparisons with `None` work, and don't
# raise exceptions. We are deliberately not using `is None` here
# because that doesn't trigger the bug. See #3108.
assert not (u.m == None) # noqa
assert u.m != None # noqa
def test_validate_power_detect_fraction():
frac = utils.validate_power(1.1666666666666665)
assert isinstance(frac, Fraction)
assert frac.numerator == 7
assert frac.denominator == 6
def test_complex_fractional_rounding_errors():
# See #3788
kappa = 0.34 * u.cm**2 / u.g
r_0 = 886221439924.7849 * u.cm
q = 1.75
rho_0 = 5e-10 * u.solMass / u.solRad**3
y = 0.5
beta = 0.19047619047619049
a = 0.47619047619047628
m_h = 1e6*u.solMass
t1 = 2 * c.c / (kappa * np.sqrt(np.pi))
t2 = (r_0**-q) / (rho_0 * y * beta * (a * c.G * m_h)**0.5)
result = ((t1 * t2)**-0.8)
assert result.unit.physical_type == 'length'
result.to(u.solRad)
def test_fractional_rounding_errors_simple():
x = (u.m ** 1.5) ** Fraction(4, 5)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0].numerator == 6
assert x.powers[0].denominator == 5
def test_enable_unit_groupings():
from astropy.units import cds
with cds.enable():
assert cds.geoMass in u.kg.find_equivalent_units()
from astropy.units import imperial
with imperial.enable():
assert imperial.inch in u.m.find_equivalent_units()
def test_unit_summary_prefixes():
"""
Test for a few units that the unit summary table correctly reports
whether or not that unit supports prefixes.
Regression test for https://github.com/astropy/astropy/issues/3835
"""
from astropy.units import astrophys
for summary in utils._iter_unit_summary(astrophys.__dict__):
unit, _, _, _, prefixes = summary
if unit.name == 'lyr':
assert prefixes
elif unit.name == 'pc':
assert prefixes
elif unit.name == 'barn':
assert prefixes
elif unit.name == 'cycle':
assert prefixes == 'No'
elif unit.name == 'spat':
assert prefixes == 'No'
elif unit.name == 'vox':
assert prefixes == 'Yes'
def test_raise_to_negative_power():
"""Test that order of bases is changed when raising to negative power.
Regression test for https://github.com/astropy/astropy/issues/8260
"""
m2s2 = u.m ** 2 / u.s ** 2
spm = m2s2 ** (-1 / 2)
assert spm.bases == [u.s, u.m]
assert spm.powers == [1, -1]
assert spm == u.s / u.m
|
c68ac88fdd25522d9173c81f9bcc48b811446a3acb13c8a22c45b0285dc54aa6 | # The purpose of these tests are to ensure that calling quantities using
# array methods returns quantities with the right units, or raises exceptions.
import sys
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.utils.compat import NUMPY_LT_1_20, NUMPY_LT_1_21_1, NUMPY_LT_1_22
class TestQuantityArrayCopy:
"""
Test whether arrays are properly copied/used in place
"""
def test_copy_on_creation(self):
v = np.arange(1000.)
q_nocopy = u.Quantity(v, "km/s", copy=False)
q_copy = u.Quantity(v, "km/s", copy=True)
v[0] = -1.
assert q_nocopy[0].value == v[0]
assert q_copy[0].value != v[0]
def test_to_copies(self):
q = u.Quantity(np.arange(1., 100.), "km/s")
q2 = q.to(u.m/u.s)
assert np.all(q.value != q2.value)
q3 = q.to(u.km/u.s)
assert np.all(q.value == q3.value)
q[0] = -1.*u.km/u.s
assert q[0].value != q3[0].value
def test_si_copies(self):
q = u.Quantity(np.arange(100.), "m/s")
q2 = q.si
assert np.all(q.value == q2.value)
q[0] = -1.*u.m/u.s
assert q[0].value != q2[0].value
def test_getitem_is_view(self):
"""Check that [keys] work, and that, like ndarray, it returns
a view, so that changing one changes the other.
Also test that one can add axes (closes #1422)
"""
q = u.Quantity(np.arange(100.), "m/s")
q_sel = q[10:20]
q_sel[0] = -1.*u.m/u.s
assert q_sel[0] == q[10]
# also check that getitem can do new axes
q2 = q[:, np.newaxis]
q2[10, 0] = -9*u.m/u.s
assert np.all(q2.flatten() == q)
def test_flat(self):
q = u.Quantity(np.arange(9.).reshape(3, 3), "m/s")
q_flat = q.flat
# check that a single item is a quantity (with the right value)
assert q_flat[8] == 8. * u.m / u.s
# and that getting a range works as well
assert np.all(q_flat[0:2] == np.arange(2.) * u.m / u.s)
# as well as getting items via iteration
q_flat_list = [_q for _q in q.flat]
assert np.all(u.Quantity(q_flat_list) ==
u.Quantity([_a for _a in q.value.flat], q.unit))
# check that flat works like a view of the real array
q_flat[8] = -1. * u.km / u.s
assert q_flat[8] == -1. * u.km / u.s
assert q[2, 2] == -1. * u.km / u.s
# while if one goes by an iterated item, a copy is made
q_flat_list[8] = -2 * u.km / u.s
assert q_flat_list[8] == -2. * u.km / u.s
assert q_flat[8] == -1. * u.km / u.s
assert q[2, 2] == -1. * u.km / u.s
class TestQuantityReshapeFuncs:
"""Test different ndarray methods that alter the array shape
tests: reshape, squeeze, ravel, flatten, transpose, swapaxes
"""
def test_reshape(self):
q = np.arange(6.) * u.m
q_reshape = q.reshape(3, 2)
assert isinstance(q_reshape, u.Quantity)
assert q_reshape.unit == q.unit
assert np.all(q_reshape.value == q.value.reshape(3, 2))
def test_squeeze(self):
q = np.arange(6.).reshape(6, 1) * u.m
q_squeeze = q.squeeze()
assert isinstance(q_squeeze, u.Quantity)
assert q_squeeze.unit == q.unit
assert np.all(q_squeeze.value == q.value.squeeze())
def test_ravel(self):
q = np.arange(6.).reshape(3, 2) * u.m
q_ravel = q.ravel()
assert isinstance(q_ravel, u.Quantity)
assert q_ravel.unit == q.unit
assert np.all(q_ravel.value == q.value.ravel())
def test_flatten(self):
q = np.arange(6.).reshape(3, 2) * u.m
q_flatten = q.flatten()
assert isinstance(q_flatten, u.Quantity)
assert q_flatten.unit == q.unit
assert np.all(q_flatten.value == q.value.flatten())
def test_transpose(self):
q = np.arange(6.).reshape(3, 2) * u.m
q_transpose = q.transpose()
assert isinstance(q_transpose, u.Quantity)
assert q_transpose.unit == q.unit
assert np.all(q_transpose.value == q.value.transpose())
def test_swapaxes(self):
q = np.arange(6.).reshape(3, 1, 2) * u.m
q_swapaxes = q.swapaxes(0, 2)
assert isinstance(q_swapaxes, u.Quantity)
assert q_swapaxes.unit == q.unit
assert np.all(q_swapaxes.value == q.value.swapaxes(0, 2))
@pytest.mark.xfail(sys.byteorder == 'big' and NUMPY_LT_1_21_1,
reason="Numpy GitHub Issue 19153")
def test_flat_attributes(self):
"""While ``flat`` doesn't make a copy, it changes the shape."""
q = np.arange(6.).reshape(3, 1, 2) * u.m
qf = q.flat
# flat shape is same as before reshaping
assert len(qf) == 6
# see TestQuantityArrayCopy.test_flat for tests of iteration
# and slicing and setting. Here we test the properties and methods to
# match `numpy.ndarray.flatiter`
assert qf.base is q
# testing the indices -- flat and full -- into the array
assert qf.coords == (0, 0, 0) # to start
assert qf.index == 0
# now consume the iterator
endindices = [(qf.index, qf.coords) for x in qf][-2] # next() oversteps
assert endindices[0] == 5
assert endindices[1] == (2, 0, 1) # shape of q - 1
# also check q_flat copies properly
q_flat_copy = qf.copy()
assert all(q_flat_copy == q.flatten())
assert isinstance(q_flat_copy, u.Quantity)
assert not np.may_share_memory(q_flat_copy, q)
class TestQuantityStatsFuncs:
"""
Test statistical functions
"""
def test_mean(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
assert_array_equal(np.mean(q1), 3.6 * u.m)
assert_array_equal(np.mean(q1, keepdims=True), [3.6] * u.m)
def test_mean_inplace(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
qi = 1.5 * u.s
qi2 = np.mean(q1, out=qi)
assert qi2 is qi
assert qi == 3.6 * u.m
@pytest.mark.xfail(NUMPY_LT_1_20, reason="'where' keyword argument not supported for numpy < 1.20")
def test_mean_where(self):
q1 = np.array([1., 2., 4., 5., 6., 7.]) * u.m
assert_array_equal(np.mean(q1, where=q1 < 7 * u.m), 3.6 * u.m)
def test_std(self):
q1 = np.array([1., 2.]) * u.m
assert_array_equal(np.std(q1), 0.5 * u.m)
assert_array_equal(q1.std(axis=-1, keepdims=True), [0.5] * u.m)
def test_std_inplace(self):
q1 = np.array([1., 2.]) * u.m
qi = 1.5 * u.s
np.std(q1, out=qi)
assert qi == 0.5 * u.m
@pytest.mark.xfail(NUMPY_LT_1_20, reason="'where' keyword argument not supported for numpy < 1.20")
def test_std_where(self):
q1 = np.array([1., 2., 3.]) * u.m
assert_array_equal(np.std(q1, where=q1 < 3 * u.m), 0.5 * u.m)
def test_var(self):
q1 = np.array([1., 2.]) * u.m
assert_array_equal(np.var(q1), 0.25 * u.m ** 2)
assert_array_equal(q1.var(axis=0, keepdims=True), [0.25] * u.m ** 2)
def test_var_inplace(self):
q1 = np.array([1., 2.]) * u.m
qi = 1.5 * u.s
np.var(q1, out=qi)
assert qi == 0.25 * u.m ** 2
@pytest.mark.xfail(NUMPY_LT_1_20, reason="'where' keyword argument not supported for numpy < 1.20")
def test_var_where(self):
q1 = np.array([1., 2., 3.]) * u.m
assert_array_equal(np.var(q1, where=q1 < 3 * u.m), 0.25 * u.m ** 2)
def test_median(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
assert np.median(q1) == 4. * u.m
def test_median_inplace(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
qi = 1.5 * u.s
np.median(q1, out=qi)
assert qi == 4 * u.m
def test_min(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
assert np.min(q1) == 1. * u.m
def test_min_inplace(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
qi = 1.5 * u.s
np.min(q1, out=qi)
assert qi == 1. * u.m
def test_min_where(self):
q1 = np.array([0., 1., 2., 4., 5., 6.]) * u.m
assert np.min(q1, initial=10 * u.m, where=q1 > 0 * u.m) == 1. * u.m
def test_argmin(self):
q1 = np.array([6., 2., 4., 5., 6.]) * u.m
assert np.argmin(q1) == 1
def test_max(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
assert np.max(q1) == 6. * u.m
def test_max_inplace(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
qi = 1.5 * u.s
np.max(q1, out=qi)
assert qi == 6. * u.m
def test_max_where(self):
q1 = np.array([1., 2., 4., 5., 6., 7.]) * u.m
assert np.max(q1, initial=0 * u.m, where=q1 < 7 * u.m) == 6. * u.m
def test_argmax(self):
q1 = np.array([5., 2., 4., 5., 6.]) * u.m
assert np.argmax(q1) == 4
def test_clip(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.km / u.m
c1 = q1.clip(1500, 5.5 * u.Mm / u.km)
assert np.all(c1 == np.array([1.5, 2., 4., 5., 5.5]) * u.km / u.m)
def test_clip_inplace(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.km / u.m
c1 = q1.clip(1500, 5.5 * u.Mm / u.km, out=q1)
assert np.all(q1 == np.array([1.5, 2., 4., 5., 5.5]) * u.km / u.m)
c1[0] = 10 * u.Mm/u.mm
assert np.all(c1.value == q1.value)
def test_conj(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.km / u.m
assert np.all(q1.conj() == q1)
def test_ptp(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
assert np.ptp(q1) == 5. * u.m
def test_ptp_inplace(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
qi = 1.5 * u.s
np.ptp(q1, out=qi)
assert qi == 5. * u.m
def test_round(self):
q1 = np.array([1.253, 2.253, 3.253]) * u.kg
assert np.all(np.round(q1) == np.array([1, 2, 3]) * u.kg)
assert np.all(np.round(q1, decimals=2) ==
np.round(q1.value, decimals=2) * u.kg)
assert np.all(q1.round(decimals=2) ==
q1.value.round(decimals=2) * u.kg)
def test_round_inplace(self):
q1 = np.array([1.253, 2.253, 3.253]) * u.kg
qi = np.zeros(3) * u.s
a = q1.round(decimals=2, out=qi)
assert a is qi
assert np.all(q1.round(decimals=2) == qi)
def test_sum(self):
q1 = np.array([1., 2., 6.]) * u.m
assert np.all(q1.sum() == 9. * u.m)
assert np.all(np.sum(q1) == 9. * u.m)
q2 = np.array([[4., 5., 9.], [1., 1., 1.]]) * u.s
assert np.all(q2.sum(0) == np.array([5., 6., 10.]) * u.s)
assert np.all(np.sum(q2, 0) == np.array([5., 6., 10.]) * u.s)
def test_sum_inplace(self):
q1 = np.array([1., 2., 6.]) * u.m
qi = 1.5 * u.s
np.sum(q1, out=qi)
assert qi == 9. * u.m
def test_sum_where(self):
q1 = np.array([1., 2., 6., 7.]) * u.m
initial = 0 * u.m
where = q1 < 7 * u.m
assert np.all(q1.sum(initial=initial, where=where) == 9. * u.m)
assert np.all(np.sum(q1, initial=initial, where=where) == 9. * u.m)
def test_cumsum(self):
q1 = np.array([1, 2, 6]) * u.m
assert np.all(q1.cumsum() == np.array([1, 3, 9]) * u.m)
assert np.all(np.cumsum(q1) == np.array([1, 3, 9]) * u.m)
q2 = np.array([4, 5, 9]) * u.s
assert np.all(q2.cumsum() == np.array([4, 9, 18]) * u.s)
assert np.all(np.cumsum(q2) == np.array([4, 9, 18]) * u.s)
def test_cumsum_inplace(self):
q1 = np.array([1, 2, 6]) * u.m
qi = np.ones(3) * u.s
np.cumsum(q1, out=qi)
assert np.all(qi == np.array([1, 3, 9]) * u.m)
q2 = q1
q1.cumsum(out=q1)
assert np.all(q2 == qi)
def test_nansum(self):
q1 = np.array([1., 2., np.nan]) * u.m
assert np.all(q1.nansum() == 3. * u.m)
assert np.all(np.nansum(q1) == 3. * u.m)
q2 = np.array([[np.nan, 5., 9.], [1., np.nan, 1.]]) * u.s
assert np.all(q2.nansum(0) == np.array([1., 5., 10.]) * u.s)
assert np.all(np.nansum(q2, 0) == np.array([1., 5., 10.]) * u.s)
def test_nansum_inplace(self):
q1 = np.array([1., 2., np.nan]) * u.m
qi = 1.5 * u.s
qout = q1.nansum(out=qi)
assert qout is qi
assert qi == np.nansum(q1.value) * q1.unit
qi2 = 1.5 * u.s
qout2 = np.nansum(q1, out=qi2)
assert qout2 is qi2
assert qi2 == np.nansum(q1.value) * q1.unit
@pytest.mark.xfail(NUMPY_LT_1_22, reason="'where' keyword argument not supported for numpy < 1.22")
def test_nansum_where(self):
q1 = np.array([1., 2., np.nan, 4.]) * u.m
initial = 0 * u.m
where = q1 < 4 * u.m
assert np.all(q1.nansum(initial=initial, where=where) == 3. * u.m)
assert np.all(np.nansum(q1, initial=initial, where=where) == 3. * u.m)
def test_prod(self):
q1 = np.array([1, 2, 6]) * u.m
with pytest.raises(u.UnitsError) as exc:
q1.prod()
with pytest.raises(u.UnitsError) as exc:
np.prod(q1)
q2 = np.array([3., 4., 5.]) * u.Unit(1)
assert q2.prod() == 60. * u.Unit(1)
assert np.prod(q2) == 60. * u.Unit(1)
def test_cumprod(self):
q1 = np.array([1, 2, 6]) * u.m
with pytest.raises(u.UnitsError) as exc:
q1.cumprod()
with pytest.raises(u.UnitsError) as exc:
np.cumprod(q1)
q2 = np.array([3, 4, 5]) * u.Unit(1)
assert np.all(q2.cumprod() == np.array([3, 12, 60]) * u.Unit(1))
assert np.all(np.cumprod(q2) == np.array([3, 12, 60]) * u.Unit(1))
def test_diff(self):
q1 = np.array([1., 2., 4., 10.]) * u.m
assert np.all(q1.diff() == np.array([1., 2., 6.]) * u.m)
assert np.all(np.diff(q1) == np.array([1., 2., 6.]) * u.m)
def test_ediff1d(self):
q1 = np.array([1., 2., 4., 10.]) * u.m
assert np.all(q1.ediff1d() == np.array([1., 2., 6.]) * u.m)
assert np.all(np.ediff1d(q1) == np.array([1., 2., 6.]) * u.m)
def test_dot_meth(self):
q1 = np.array([1., 2., 4., 10.]) * u.m
q2 = np.array([3., 4., 5., 6.]) * u.s
q3 = q1.dot(q2)
assert q3.value == np.dot(q1.value, q2.value)
assert q3.unit == u.m * u.s
def test_trace_func(self):
q = np.array([[1., 2.], [3., 4.]]) * u.m
assert np.trace(q) == 5. * u.m
def test_trace_meth(self):
q1 = np.array([[1., 2.], [3., 4.]]) * u.m
assert q1.trace() == 5. * u.m
cont = u.Quantity(4., u.s)
q2 = np.array([[3., 4.], [5., 6.]]) * u.m
q2.trace(out=cont)
assert cont == 9. * u.m
def test_clip_func(self):
q = np.arange(10) * u.m
assert np.all(np.clip(q, 3 * u.m, 6 * u.m) == np.array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.]) * u.m)
def test_clip_meth(self):
expected = np.array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.]) * u.m
q1 = np.arange(10) * u.m
q3 = q1.clip(3 * u.m, 6 * u.m)
assert np.all(q1.clip(3 * u.m, 6 * u.m) == expected)
cont = np.zeros(10) * u.s
q1.clip(3 * u.m, 6 * u.m, out=cont)
assert np.all(cont == expected)
class TestArrayConversion:
"""
Test array conversion methods
"""
def test_item(self):
q1 = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int)
assert q1.item(1) == 2 * q1.unit
q1.itemset(1, 1)
assert q1.item(1) == 1000 * u.m / u.km
q1.itemset(1, 100 * u.cm / u.km)
assert q1.item(1) == 1 * u.m / u.km
with pytest.raises(TypeError):
q1.itemset(1, 1.5 * u.m / u.km)
with pytest.raises(ValueError):
q1.itemset()
q1[1] = 1
assert q1[1] == 1000 * u.m / u.km
q1[1] = 100 * u.cm / u.km
assert q1[1] == 1 * u.m / u.km
with pytest.raises(TypeError):
q1[1] = 1.5 * u.m / u.km
def test_take_put(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
assert q1.take(1) == 2 * u.m / u.km
assert all(q1.take((0, 2)) == np.array([1, 3]) * u.m / u.km)
q1.put((1, 2), (3, 4))
assert np.all(q1.take((1, 2)) == np.array([3000, 4000]) * q1.unit)
q1.put(0, 500 * u.cm / u.km)
assert q1.item(0) == 5 * u.m / u.km
def test_slice(self):
"""Test that setitem changes the unit if needed (or ignores it for
values where that is allowed; viz., #2695)"""
q2 = np.array([[1., 2., 3.], [4., 5., 6.]]) * u.km / u.m
q1 = q2.copy()
q2[0, 0] = 10000.
assert q2.unit == q1.unit
assert q2[0, 0].value == 10.
q2[0] = 9. * u.Mm / u.km
assert all(q2.flatten()[:3].value == np.array([9., 9., 9.]))
q2[0, :-1] = 8000.
assert all(q2.flatten()[:3].value == np.array([8., 8., 9.]))
with pytest.raises(u.UnitsError):
q2[1, 1] = 10 * u.s
# just to be sure, repeat with a dimensionfull unit
q3 = u.Quantity(np.arange(10.), "m/s")
q3[5] = 100. * u.cm / u.s
assert q3[5].value == 1.
# and check unit is ignored for 0, inf, nan, where that is reasonable
q3[5] = 0.
assert q3[5] == 0.
q3[5] = np.inf
assert np.isinf(q3[5])
q3[5] = np.nan
assert np.isnan(q3[5])
def test_fill(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
q1.fill(2)
assert np.all(q1 == 2000 * u.m / u.km)
def test_repeat_compress_diagonal(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
q2 = q1.repeat(2)
assert q2.unit == q1.unit
assert all(q2.value == q1.value.repeat(2))
q2.sort()
assert q2.unit == q1.unit
q2 = q1.compress(np.array([True, True, False, False]))
assert q2.unit == q1.unit
assert all(q2.value == q1.value.compress(np.array([True, True,
False, False])))
q1 = np.array([[1, 2], [3, 4]]) * u.m / u.km
q2 = q1.diagonal()
assert q2.unit == q1.unit
assert all(q2.value == q1.value.diagonal())
def test_view(self):
q1 = np.array([1, 2, 3], dtype=np.int64) * u.m / u.km
q2 = q1.view(np.ndarray)
assert not hasattr(q2, 'unit')
q3 = q2.view(u.Quantity)
assert q3._unit is None
# MaskedArray copies and properties assigned in __dict__
q4 = np.ma.MaskedArray(q1)
assert q4._unit is q1._unit
q5 = q4.view(u.Quantity)
assert q5.unit is q1.unit
def test_slice_to_quantity(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2003
"""
a = np.random.uniform(size=(10, 8))
x, y, z = a[:, 1:4].T * u.km/u.s
total = np.sum(a[:, 1] * u.km / u.s - x)
assert isinstance(total, u.Quantity)
assert total == (0.0 * u.km / u.s)
def test_byte_type_view_field_changes(self):
q1 = np.array([1, 2, 3], dtype=np.int64) * u.m / u.km
q2 = q1.byteswap()
assert q2.unit == q1.unit
assert all(q2.value == q1.value.byteswap())
q2 = q1.astype(np.float64)
assert all(q2 == q1)
assert q2.dtype == np.float64
q2a = q1.getfield(np.int32, offset=0)
q2b = q1.byteswap().getfield(np.int32, offset=4)
assert q2a.unit == q1.unit
assert all(q2b.byteswap() == q2a)
def test_sort(self):
q1 = np.array([1., 5., 2., 4.]) * u.km / u.m
i = q1.argsort()
assert not hasattr(i, 'unit')
q1.sort()
i = q1.searchsorted([1500, 2500])
assert not hasattr(i, 'unit')
assert all(i == q1.to(
u.dimensionless_unscaled).value.searchsorted([1500, 2500]))
def test_not_implemented(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
with pytest.raises(NotImplementedError):
q1.choose([0, 0, 1])
with pytest.raises(NotImplementedError):
q1.tolist()
with pytest.raises(NotImplementedError):
q1.tostring()
with pytest.raises(NotImplementedError):
q1.tobytes()
with pytest.raises(NotImplementedError):
q1.tofile(0)
with pytest.raises(NotImplementedError):
q1.dump('a.a')
with pytest.raises(NotImplementedError):
q1.dumps()
class TestRecArray:
"""Record arrays are not specifically supported, but we should not
prevent their use unnecessarily"""
def setup(self):
self.ra = (np.array(np.arange(12.).reshape(4, 3))
.view(dtype=('f8,f8,f8')).squeeze())
def test_creation(self):
qra = u.Quantity(self.ra, u.m)
assert np.all(qra[:2].value == self.ra[:2])
def test_equality(self):
qra = u.Quantity(self.ra, u.m)
qra[1] = qra[2]
assert qra[1] == qra[2]
|
1393d7cabb6b36badfc5b6b2797acfc114cb74b954a105420e6657c3cf75ec45 | # coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the Logarithmic Units and Quantities
"""
import itertools
import pickle
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import constants as c
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
lu_units = [u.dex, u.mag, u.decibel]
lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit]
lq_subclasses = [u.Dex, u.Magnitude, u.Decibel]
pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy)
class TestLogUnitCreation:
def test_logarithmic_units(self):
"""Check logarithmic units are set up correctly."""
assert u.dB.to(u.dex) == 0.1
assert u.dex.to(u.mag) == -2.5
assert u.mag.to(u.dB) == -4
@pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses))
def test_callable_units(self, lu_unit, lu_cls):
assert isinstance(lu_unit, u.UnitBase)
assert callable(lu_unit)
assert lu_unit._function_unit_class is lu_cls
@pytest.mark.parametrize('lu_unit', lu_units)
def test_equality_to_normal_unit_for_dimensionless(self, lu_unit):
lu = lu_unit()
assert lu == lu._default_function_unit # eg, MagUnit() == u.mag
assert lu._default_function_unit == lu # and u.mag == MagUnit()
@pytest.mark.parametrize('lu_unit, physical_unit',
itertools.product(lu_units, pu_sample))
def test_call_units(self, lu_unit, physical_unit):
"""Create a LogUnit subclass using the callable unit and physical unit,
and do basic check that output is right."""
lu1 = lu_unit(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
def test_call_invalid_unit(self):
with pytest.raises(TypeError):
u.mag([])
with pytest.raises(ValueError):
u.mag(u.mag())
@pytest.mark.parametrize('lu_cls, physical_unit', itertools.product(
lu_subclasses + [u.LogUnit], pu_sample))
def test_subclass_creation(self, lu_cls, physical_unit):
"""Create a LogUnit subclass object for given physical unit,
and do basic check that output is right."""
lu1 = lu_cls(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
lu2 = lu_cls(physical_unit,
function_unit=2*lu1._default_function_unit)
assert lu2.physical_unit == physical_unit
assert lu2.function_unit == u.Unit(2*lu2._default_function_unit)
with pytest.raises(ValueError):
lu_cls(physical_unit, u.m)
def test_lshift_magnitude(self):
mag = 1. << u.ABmag
assert isinstance(mag, u.Magnitude)
assert mag.unit == u.ABmag
assert mag.value == 1.
# same test for an array, which should produce a view
a2 = np.arange(10.)
q2 = a2 << u.ABmag
assert isinstance(q2, u.Magnitude)
assert q2.unit == u.ABmag
assert np.all(q2.value == a2)
a2[9] = 0.
assert np.all(q2.value == a2)
# a different magnitude unit
mag = 10. << u.STmag
assert isinstance(mag, u.Magnitude)
assert mag.unit == u.STmag
assert mag.value == 10.
def test_ilshift_magnitude(self):
# test in-place operation and conversion
mag_fnu_cgs = u.mag(u.erg/u.s/u.cm**2/u.Hz)
m = np.arange(10.0) * u.mag(u.Jy)
jy = m.physical
m2 = m << mag_fnu_cgs
assert np.all(m2 == m.to(mag_fnu_cgs))
m2 = m
m <<= mag_fnu_cgs
assert m is m2 # Check it was done in-place!
assert np.all(m.value == m2.value)
assert m.unit == mag_fnu_cgs
# Check it works if equivalencies are in-place.
with u.add_enabled_equivalencies(u.spectral_density(5500*u.AA)):
st = jy.to(u.ST)
m <<= u.STmag
assert m is m2
assert_quantity_allclose(m.physical, st)
assert m.unit == u.STmag
def test_lshift_errors(self):
m = np.arange(10.0) * u.mag(u.Jy)
with pytest.raises(u.UnitsError):
m << u.STmag
with pytest.raises(u.UnitsError):
m << u.Jy
with pytest.raises(u.UnitsError):
m <<= u.STmag
with pytest.raises(u.UnitsError):
m <<= u.Jy
def test_predefined_magnitudes():
assert_quantity_allclose((-21.1*u.STmag).physical,
1.*u.erg/u.cm**2/u.s/u.AA)
assert_quantity_allclose((-48.6*u.ABmag).physical,
1.*u.erg/u.cm**2/u.s/u.Hz)
assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0)
assert_quantity_allclose((0*u.m_bol).physical,
c.L_bol0/(4.*np.pi*(10.*c.pc)**2))
def test_predefined_reinitialisation():
assert u.mag('STflux') == u.STmag
assert u.mag('ABflux') == u.ABmag
assert u.mag('Bol') == u.M_bol
assert u.mag('bol') == u.m_bol
# required for backwards-compatibility, at least unless deprecated
assert u.mag('ST') == u.STmag
assert u.mag('AB') == u.ABmag
def test_predefined_string_roundtrip():
"""Ensure round-tripping; see #5015"""
assert u.Unit(u.STmag.to_string()) == u.STmag
assert u.Unit(u.ABmag.to_string()) == u.ABmag
assert u.Unit(u.M_bol.to_string()) == u.M_bol
assert u.Unit(u.m_bol.to_string()) == u.m_bol
def test_inequality():
"""Check __ne__ works (regression for #5342)."""
lu1 = u.mag(u.Jy)
lu2 = u.dex(u.Jy)
lu3 = u.mag(u.Jy**2)
lu4 = lu3 - lu1
assert lu1 != lu2
assert lu1 != lu3
assert lu1 == lu4
class TestLogUnitStrings:
def test_str(self):
"""Do some spot checks that str, repr, etc. work as expected."""
lu1 = u.mag(u.Jy)
assert str(lu1) == 'mag(Jy)'
assert repr(lu1) == 'Unit("mag(Jy)")'
assert lu1.to_string('generic') == 'mag(Jy)'
with pytest.raises(ValueError):
lu1.to_string('fits')
with pytest.raises(ValueError):
lu1.to_string(format='cds')
lu2 = u.dex()
assert str(lu2) == 'dex'
assert repr(lu2) == 'Unit("dex(1)")'
assert lu2.to_string() == 'dex(1)'
lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag)
assert str(lu3) == '2 mag(Jy)'
assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")'
assert lu3.to_string() == '2 mag(Jy)'
lu4 = u.mag(u.ct)
assert lu4.to_string('generic') == 'mag(ct)'
latex_str = r'$\mathrm{mag}$$\mathrm{\left( \mathrm{ct} \right)}$'
assert lu4.to_string('latex') == latex_str
assert lu4.to_string('latex_inline') == latex_str
assert lu4._repr_latex_() == latex_str
lu5 = u.mag(u.ct/u.s)
assert lu5.to_string('latex') == (r'$\mathrm{mag}$$\mathrm{\left( '
r'\mathrm{\frac{ct}{s}} \right)}$')
latex_str = (r'$\mathrm{mag}$$\mathrm{\left( \mathrm{ct\,s^{-1}} '
r'\right)}$')
assert lu5.to_string('latex_inline') == latex_str
class TestLogUnitConversion:
@pytest.mark.parametrize('lu_unit, physical_unit',
itertools.product(lu_units, pu_sample))
def test_physical_unit_conversion(self, lu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to their non-log counterparts."""
lu1 = lu_unit(physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(physical_unit, 0.) == 1.
assert physical_unit.is_equivalent(lu1)
assert physical_unit.to(lu1, 1.) == 0.
pu = u.Unit(8.*physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(pu, 0.) == 0.125
assert pu.is_equivalent(lu1)
assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15)
# Check we round-trip.
value = np.linspace(0., 10., 6)
assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15)
# And that we're not just returning True all the time.
pu2 = u.g
assert not lu1.is_equivalent(pu2)
with pytest.raises(u.UnitsError):
lu1.to(pu2)
assert not pu2.is_equivalent(lu1)
with pytest.raises(u.UnitsError):
pu2.to(lu1)
@pytest.mark.parametrize('lu_unit', lu_units)
def test_container_unit_conversion(self, lu_unit):
"""Check that conversion to logarithmic units (u.mag, u.dB, u.dex)
is only possible when the physical unit is dimensionless."""
values = np.linspace(0., 10., 6)
lu1 = lu_unit(u.dimensionless_unscaled)
assert lu1.is_equivalent(lu1.function_unit)
assert_allclose(lu1.to(lu1.function_unit, values), values)
lu2 = lu_unit(u.Jy)
assert not lu2.is_equivalent(lu2.function_unit)
with pytest.raises(u.UnitsError):
lu2.to(lu2.function_unit, values)
@pytest.mark.parametrize(
'flu_unit, tlu_unit, physical_unit',
itertools.product(lu_units, lu_units, pu_sample))
def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to each other if they correspond to equivalent physical units."""
values = np.linspace(0., 10., 6)
flu = flu_unit(physical_unit)
tlu = tlu_unit(physical_unit)
assert flu.is_equivalent(tlu)
assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit))
assert_allclose(flu.to(tlu, values),
values * flu.function_unit.to(tlu.function_unit))
tlu2 = tlu_unit(u.Unit(100.*physical_unit))
assert flu.is_equivalent(tlu2)
# Check that we round-trip.
assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15)
tlu3 = tlu_unit(physical_unit.to_system(u.si)[0])
assert flu.is_equivalent(tlu3)
assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15)
tlu4 = tlu_unit(u.g)
assert not flu.is_equivalent(tlu4)
with pytest.raises(u.UnitsError):
flu.to(tlu4, values)
def test_unit_decomposition(self):
lu = u.mag(u.Jy)
assert lu.decompose() == u.mag(u.Jy.decompose())
assert lu.decompose().physical_unit.bases == [u.kg, u.s]
assert lu.si == u.mag(u.Jy.si)
assert lu.si.physical_unit.bases == [u.kg, u.s]
assert lu.cgs == u.mag(u.Jy.cgs)
assert lu.cgs.physical_unit.bases == [u.g, u.s]
def test_unit_multiple_possible_equivalencies(self):
lu = u.mag(u.Jy)
assert lu.is_equivalent(pu_sample)
def test_magnitude_conversion_fails_message(self):
"""Check that "dimensionless" magnitude units include a message in their
exception text suggesting a possible cause of the problem.
"""
with pytest.raises(u.UnitConversionError) as excinfo:
(10*u.ABmag - 2*u.ABmag).to(u.nJy)
assert "Did you perhaps subtract magnitudes so the unit got lost?" in str(excinfo.value)
class TestLogUnitArithmetic:
def test_multiplication_division(self):
"""Check that multiplication/division with other units is only
possible when the physical unit is dimensionless, and that this
turns the unit into a normal one."""
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 * u.m
with pytest.raises(u.UnitsError):
u.m * lu1
with pytest.raises(u.UnitsError):
lu1 / lu1
for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lu1 / unit
lu2 = u.mag(u.dimensionless_unscaled)
with pytest.raises(u.UnitsError):
lu2 * lu1
with pytest.raises(u.UnitsError):
lu2 / lu1
# But dimensionless_unscaled can be cancelled.
assert lu2 / lu2 == u.dimensionless_unscaled
# With dimensionless, normal units are OK, but we return a plain unit.
tf = lu2 * u.m
tr = u.m * lu2
for t in (tf, tr):
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lu2.physical_unit)
# Now we essentially have a LogUnit with a prefactor of 100,
# so should be equivalent again.
t = tf / u.cm
with u.set_enabled_equivalencies(u.logarithmic()):
assert t.is_equivalent(lu2.function_unit)
assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.),
lu2.to(lu2.physical_unit, np.arange(3.)))
# If we effectively remove lu1, a normal unit should be returned.
t2 = tf / lu2
assert not isinstance(t2, type(lu2))
assert t2 == u.m
t3 = tf / lu2.function_unit
assert not isinstance(t3, type(lu2))
assert t3 == u.m
# For completeness, also ensure non-sensical operations fail
with pytest.raises(TypeError):
lu1 * object()
with pytest.raises(TypeError):
slice(None) * lu1
with pytest.raises(TypeError):
lu1 / []
with pytest.raises(TypeError):
1 / lu1
@pytest.mark.parametrize('power', (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogUnits to some power is only possible when the
physical unit is dimensionless, and that conversion is turned off when
the resulting logarithmic unit (such as mag**2) is incompatible."""
lu1 = u.mag(u.Jy)
if power == 0:
assert lu1 ** power == u.dimensionless_unscaled
elif power == 1:
assert lu1 ** power == lu1
else:
with pytest.raises(u.UnitsError):
lu1 ** power
# With dimensionless, though, it works, but returns a normal unit.
lu2 = u.mag(u.dimensionless_unscaled)
t = lu2**power
if power == 0:
assert t == u.dimensionless_unscaled
elif power == 1:
assert t == lu2
else:
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit**power
# also check we roundtrip
t2 = t**(1./power)
assert t2 == lu2.function_unit
with u.set_enabled_equivalencies(u.logarithmic()):
assert_allclose(t2.to(u.dimensionless_unscaled, np.arange(3.)),
lu2.to(lu2.physical_unit, np.arange(3.)))
@pytest.mark.parametrize('other', pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 + other
with pytest.raises(u.UnitsError):
lu1 - other
with pytest.raises(u.UnitsError):
other - lu1
def test_addition_subtraction_to_non_units_fails(self):
lu1 = u.mag(u.Jy)
with pytest.raises(TypeError):
lu1 + 1.
with pytest.raises(TypeError):
lu1 - [1., 2., 3.]
@pytest.mark.parametrize(
'other', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m),
u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag)))
def test_addition_subtraction(self, other):
"""Check physical units are changed appropriately"""
lu1 = u.mag(u.Jy)
other_pu = getattr(other, 'physical_unit', u.dimensionless_unscaled)
lu_sf = lu1 + other
assert lu_sf.is_equivalent(lu1.physical_unit * other_pu)
lu_sr = other + lu1
assert lu_sr.is_equivalent(lu1.physical_unit * other_pu)
lu_df = lu1 - other
assert lu_df.is_equivalent(lu1.physical_unit / other_pu)
lu_dr = other - lu1
assert lu_dr.is_equivalent(other_pu / lu1.physical_unit)
def test_complicated_addition_subtraction(self):
"""for fun, a more complicated example of addition and subtraction"""
dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2))
lu_dm = u.mag(dm0)
lu_absST = u.STmag - lu_dm
assert lu_absST.is_equivalent(u.erg/u.s/u.AA)
def test_neg_pos(self):
lu1 = u.mag(u.Jy)
neg_lu = -lu1
assert neg_lu != lu1
assert neg_lu.physical_unit == u.Jy**-1
assert -neg_lu == lu1
pos_lu = +lu1
assert pos_lu is not lu1
assert pos_lu == lu1
def test_pickle():
lu1 = u.dex(u.cm/u.s**2)
s = pickle.dumps(lu1)
lu2 = pickle.loads(s)
assert lu1 == lu2
def test_hashable():
lu1 = u.dB(u.mW)
lu2 = u.dB(u.m)
lu3 = u.dB(u.mW)
assert hash(lu1) != hash(lu2)
assert hash(lu1) == hash(lu3)
luset = {lu1, lu2, lu3}
assert len(luset) == 2
class TestLogQuantityCreation:
@pytest.mark.parametrize('lq, lu', zip(lq_subclasses + [u.LogQuantity],
lu_subclasses + [u.LogUnit]))
def test_logarithmic_quantities(self, lq, lu):
"""Check logarithmic quantities are all set up correctly"""
assert lq._unit_class == lu
assert type(lu()._quantity_class(1.)) is lq
@pytest.mark.parametrize('lq_cls, physical_unit',
itertools.product(lq_subclasses, pu_sample))
def test_subclass_creation(self, lq_cls, physical_unit):
"""Create LogQuantity subclass objects for some physical units,
and basic check on transformations"""
value = np.arange(1., 10.)
log_q = lq_cls(value * physical_unit)
assert log_q.unit.physical_unit == physical_unit
assert log_q.unit.function_unit == log_q.unit._default_function_unit
assert_allclose(log_q.physical.value, value)
with pytest.raises(ValueError):
lq_cls(value, physical_unit)
@pytest.mark.parametrize(
'unit', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m),
u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag),
u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag)))
def test_different_units(self, unit):
q = u.Magnitude(1.23, unit)
assert q.unit.function_unit == getattr(unit, 'function_unit', unit)
assert q.unit.physical_unit is getattr(unit, 'physical_unit',
u.dimensionless_unscaled)
@pytest.mark.parametrize('value, unit', (
(1.*u.mag(u.Jy), None),
(1.*u.dex(u.Jy), None),
(1.*u.mag(u.W/u.m**2/u.Hz), u.mag(u.Jy)),
(1.*u.dex(u.W/u.m**2/u.Hz), u.mag(u.Jy))))
def test_function_values(self, value, unit):
lq = u.Magnitude(value, unit)
assert lq == value
assert lq.unit.function_unit == u.mag
assert lq.unit.physical_unit == getattr(unit, 'physical_unit',
value.unit.physical_unit)
@pytest.mark.parametrize(
'unit', (u.mag(), u.mag(u.Jy), u.mag(u.m), u.MagUnit('', 2.*u.mag),
u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag)))
def test_indirect_creation(self, unit):
q1 = 2.5 * unit
assert isinstance(q1, u.Magnitude)
assert q1.value == 2.5
assert q1.unit == unit
pv = 100. * unit.physical_unit
q2 = unit * pv
assert q2.unit == unit
assert q2.unit.physical_unit == pv.unit
assert q2.to_value(unit.physical_unit) == 100.
assert (q2._function_view / u.mag).to_value(1) == -5.
q3 = unit / 0.4
assert q3 == q1
def test_from_view(self):
# Cannot view a physical quantity as a function quantity, since the
# values would change.
q = [100., 1000.] * u.cm/u.s**2
with pytest.raises(TypeError):
q.view(u.Dex)
# But fine if we have the right magnitude.
q = [2., 3.] * u.dex
lq = q.view(u.Dex)
assert isinstance(lq, u.Dex)
assert lq.unit.physical_unit == u.dimensionless_unscaled
assert np.all(q == lq)
def test_using_quantity_class(self):
"""Check that we can use Quantity if we have subok=True"""
# following issue #5851
lu = u.dex(u.AA)
with pytest.raises(u.UnitTypeError):
u.Quantity(1., lu)
q = u.Quantity(1., lu, subok=True)
assert type(q) is lu._quantity_class
def test_conversion_to_and_from_physical_quantities():
"""Ensures we can convert from regular quantities."""
mst = [10., 12., 14.] * u.STmag
flux_lambda = mst.physical
mst_roundtrip = flux_lambda.to(u.STmag)
# check we return a logquantity; see #5178.
assert isinstance(mst_roundtrip, u.Magnitude)
assert mst_roundtrip.unit == mst.unit
assert_allclose(mst_roundtrip.value, mst.value)
wave = [4956.8, 4959.55, 4962.3] * u.AA
flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave))
mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave))
assert isinstance(mst_roundtrip2, u.Magnitude)
assert mst_roundtrip2.unit == mst.unit
assert_allclose(mst_roundtrip2.value, mst.value)
def test_quantity_decomposition():
lq = 10.*u.mag(u.Jy)
assert lq.decompose() == lq
assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s]
assert lq.si == lq
assert lq.si.unit.physical_unit.bases == [u.kg, u.s]
assert lq.cgs == lq
assert lq.cgs.unit.physical_unit.bases == [u.g, u.s]
class TestLogQuantityViews:
def setup(self):
self.lq = u.Magnitude(np.arange(1., 10.) * u.Jy)
self.lq2 = u.Magnitude(np.arange(1., 5.))
def test_value_view(self):
lq_value = self.lq.value
assert type(lq_value) is np.ndarray
lq_value[2] = -1.
assert np.all(self.lq.value == lq_value)
def test_function_view(self):
lq_fv = self.lq._function_view
assert type(lq_fv) is u.Quantity
assert lq_fv.unit is self.lq.unit.function_unit
lq_fv[3] = -2. * lq_fv.unit
assert np.all(self.lq.value == lq_fv.value)
def test_quantity_view(self):
# Cannot view as Quantity, since the unit cannot be represented.
with pytest.raises(TypeError):
self.lq.view(u.Quantity)
# But a dimensionless one is fine.
q2 = self.lq2.view(u.Quantity)
assert q2.unit is u.mag
assert np.all(q2.value == self.lq2.value)
lq3 = q2.view(u.Magnitude)
assert type(lq3.unit) is u.MagUnit
assert lq3.unit.physical_unit == u.dimensionless_unscaled
assert np.all(lq3 == self.lq2)
class TestLogQuantitySlicing:
def test_item_get_and_set(self):
lq1 = u.Magnitude(np.arange(1., 11.)*u.Jy)
assert lq1[9] == u.Magnitude(10.*u.Jy)
lq1[2] = 100.*u.Jy
assert lq1[2] == u.Magnitude(100.*u.Jy)
with pytest.raises(u.UnitsError):
lq1[2] = 100.*u.m
with pytest.raises(u.UnitsError):
lq1[2] = 100.*u.mag
with pytest.raises(u.UnitsError):
lq1[2] = u.Magnitude(100.*u.m)
assert lq1[2] == u.Magnitude(100.*u.Jy)
def test_slice_get_and_set(self):
lq1 = u.Magnitude(np.arange(1., 10.)*u.Jy)
lq1[2:4] = 100.*u.Jy
assert np.all(lq1[2:4] == u.Magnitude(100.*u.Jy))
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.*u.m
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.*u.mag
with pytest.raises(u.UnitsError):
lq1[2:4] = u.Magnitude(100.*u.m)
assert np.all(lq1[2] == u.Magnitude(100.*u.Jy))
class TestLogQuantityArithmetic:
@pytest.mark.parametrize(
'other', [2.4 * u.mag(), 12.34 * u.ABmag,
u.Magnitude(3.45 * u.Jy), u.Dex(3.),
u.Dex(np.linspace(3000, 5000, 10) * u.Angstrom),
u.Magnitude(6.78, 2. * u.mag)])
@pytest.mark.parametrize('fac', [1., 2, 0.4])
def test_multiplication_division(self, other, fac):
"""Check that multiplication and division works as expectes"""
lq_sf = fac * other
assert lq_sf.unit.physical_unit == other.unit.physical_unit**fac
assert_allclose(lq_sf.physical, other.physical ** fac)
lq_sf = other * fac
assert lq_sf.unit.physical_unit == other.unit.physical_unit**fac
assert_allclose(lq_sf.physical, other.physical ** fac)
lq_sf = other / fac
assert lq_sf.unit.physical_unit**fac == other.unit.physical_unit
assert_allclose(lq_sf.physical**fac, other.physical)
lq_sf = other.copy()
lq_sf *= fac
assert lq_sf.unit.physical_unit == other.unit.physical_unit**fac
assert_allclose(lq_sf.physical, other.physical ** fac)
lq_sf = other.copy()
lq_sf /= fac
assert lq_sf.unit.physical_unit**fac == other.unit.physical_unit
assert_allclose(lq_sf.physical**fac, other.physical)
def test_more_multiplication_division(self):
"""Check that multiplication/division with other quantities is only
possible when the physical unit is dimensionless, and that this turns
the result into a normal quantity."""
lq = u.Magnitude(np.arange(1., 11.)*u.Jy)
with pytest.raises(u.UnitsError):
lq * (1.*u.m)
with pytest.raises(u.UnitsError):
(1.*u.m) * lq
with pytest.raises(u.UnitsError):
lq / lq
for unit in (u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lq / unit
lq2 = u.Magnitude(np.arange(1, 11.))
with pytest.raises(u.UnitsError):
lq2 * lq
with pytest.raises(u.UnitsError):
lq2 / lq
with pytest.raises(u.UnitsError):
lq / lq2
lq_sf = lq.copy()
with pytest.raises(u.UnitsError):
lq_sf *= lq2
# ensure that nothing changed inside
assert (lq_sf == lq).all()
with pytest.raises(u.UnitsError):
lq_sf /= lq2
# ensure that nothing changed inside
assert (lq_sf == lq).all()
# but dimensionless_unscaled can be cancelled
r = lq2 / u.Magnitude(2.)
assert r.unit == u.dimensionless_unscaled
assert np.all(r.value == lq2.value/2.)
# with dimensionless, normal units OK, but return normal quantities
tf = lq2 * u.m
tr = u.m * lq2
for t in (tf, tr):
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lq2.unit.physical_unit)
t = tf / (50.*u.cm)
# now we essentially have the same quantity but with a prefactor of 2
assert t.unit.is_equivalent(lq2.unit.function_unit)
assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view*2)
@pytest.mark.parametrize('power', (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogQuantities to some power is only possible when
the physical unit is dimensionless, and that conversion is turned off
when the resulting logarithmic unit (say, mag**2) is incompatible."""
lq = u.Magnitude(np.arange(1., 4.)*u.Jy)
if power == 0:
assert np.all(lq ** power == 1.)
elif power == 1:
assert np.all(lq ** power == lq)
else:
with pytest.raises(u.UnitsError):
lq ** power
# with dimensionless, it works, but falls back to normal quantity
# (except for power=1)
lq2 = u.Magnitude(np.arange(10.))
t = lq2**power
if power == 0:
assert t.unit is u.dimensionless_unscaled
assert np.all(t.value == 1.)
elif power == 1:
assert np.all(t == lq2)
else:
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit ** power
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(u.dimensionless_unscaled)
def test_error_on_lq_as_power(self):
lq = u.Magnitude(np.arange(1., 4.)*u.Jy)
with pytest.raises(TypeError):
lq ** lq
@pytest.mark.parametrize('other', pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
q = 1.23 * other
with pytest.raises(u.UnitsError):
lq + q
with pytest.raises(u.UnitsError):
lq - q
with pytest.raises(u.UnitsError):
q - lq
@pytest.mark.parametrize(
'other', (1.23 * u.mag, 2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag)))
def test_addition_subtraction(self, other):
"""Check that addition/subtraction with quantities with magnitude or
MagUnit units works, and that it changes the physical units
appropriately."""
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
other_physical = other.to(getattr(other.unit, 'physical_unit',
u.dimensionless_unscaled),
equivalencies=u.logarithmic())
lq_sf = lq + other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_sr = other + lq
assert_allclose(lq_sr.physical, lq.physical * other_physical)
lq_df = lq - other
assert_allclose(lq_df.physical, lq.physical / other_physical)
lq_dr = other - lq
assert_allclose(lq_dr.physical, other_physical / lq.physical)
@pytest.mark.parametrize('other', pu_sample)
def test_inplace_addition_subtraction_unit_checks(self, other):
lu1 = u.mag(u.Jy)
lq1 = u.Magnitude(np.arange(1., 10.), lu1)
with pytest.raises(u.UnitsError):
lq1 += other
assert np.all(lq1.value == np.arange(1., 10.))
assert lq1.unit == lu1
with pytest.raises(u.UnitsError):
lq1 -= other
assert np.all(lq1.value == np.arange(1., 10.))
assert lq1.unit == lu1
@pytest.mark.parametrize(
'other', (1.23 * u.mag, 2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag)))
def test_inplace_addition_subtraction(self, other):
"""Check that inplace addition/subtraction with quantities with
magnitude or MagUnit units works, and that it changes the physical
units appropriately."""
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
other_physical = other.to(getattr(other.unit, 'physical_unit',
u.dimensionless_unscaled),
equivalencies=u.logarithmic())
lq_sf = lq.copy()
lq_sf += other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_df = lq.copy()
lq_df -= other
assert_allclose(lq_df.physical, lq.physical / other_physical)
def test_complicated_addition_subtraction(self):
"""For fun, a more complicated example of addition and subtraction."""
dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2))
DMmag = u.mag(dm0)
m_st = 10. * u.STmag
dm = 5. * DMmag
M_st = m_st - dm
assert M_st.unit.is_equivalent(u.erg/u.s/u.AA)
assert np.abs(M_st.physical /
(m_st.physical*4.*np.pi*(100.*u.pc)**2) - 1.) < 1.e-15
class TestLogQuantityComparisons:
def test_comparison_to_non_quantities_fails(self):
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
with pytest.raises(TypeError):
lq > 'a'
assert not (lq == 'a')
assert lq != 'a'
def test_comparison(self):
lq1 = u.Magnitude(np.arange(1., 4.)*u.Jy)
lq2 = u.Magnitude(2.*u.Jy)
assert np.all((lq1 > lq2) == np.array([True, False, False]))
assert np.all((lq1 == lq2) == np.array([False, True, False]))
lq3 = u.Dex(2.*u.Jy)
assert np.all((lq1 > lq3) == np.array([True, False, False]))
assert np.all((lq1 == lq3) == np.array([False, True, False]))
lq4 = u.Magnitude(2.*u.m)
assert not (lq1 == lq4)
assert lq1 != lq4
with pytest.raises(u.UnitsError):
lq1 < lq4
q5 = 1.5 * u.Jy
assert np.all((lq1 > q5) == np.array([True, False, False]))
assert np.all((q5 < lq1) == np.array([True, False, False]))
with pytest.raises(u.UnitsError):
lq1 >= 2.*u.m
with pytest.raises(u.UnitsError):
lq1 <= lq1.value * u.mag
# For physically dimensionless, we can compare with the function unit.
lq6 = u.Magnitude(np.arange(1., 4.))
fv6 = lq6.value * u.mag
assert np.all(lq6 == fv6)
# but not some arbitrary unit, of course.
with pytest.raises(u.UnitsError):
lq6 < 2.*u.m
class TestLogQuantityMethods:
def setup(self):
self.mJy = np.arange(1., 5.).reshape(2, 2) * u.mag(u.Jy)
self.m1 = np.arange(1., 5.5, 0.5).reshape(3, 3) * u.mag()
self.mags = (self.mJy, self.m1)
@pytest.mark.parametrize('method', ('mean', 'min', 'max', 'round', 'trace',
'std', 'var', 'ptp', 'diff', 'ediff1d'))
def test_always_ok(self, method):
for mag in self.mags:
res = getattr(mag, method)()
assert np.all(res.value ==
getattr(mag._function_view, method)().value)
if method in ('std', 'ptp', 'diff', 'ediff1d'):
assert res.unit == u.mag()
elif method == 'var':
assert res.unit == u.mag**2
else:
assert res.unit == mag.unit
def test_clip(self):
for mag in self.mags:
assert np.all(mag.clip(2. * mag.unit, 4. * mag.unit).value ==
mag.value.clip(2., 4.))
@pytest.mark.parametrize('method', ('sum', 'cumsum', 'nansum'))
def test_only_ok_if_dimensionless(self, method):
res = getattr(self.m1, method)()
assert np.all(res.value ==
getattr(self.m1._function_view, method)().value)
assert res.unit == self.m1.unit
with pytest.raises(TypeError):
getattr(self.mJy, method)()
def test_dot(self):
assert np.all(self.m1.dot(self.m1).value ==
self.m1.value.dot(self.m1.value))
@pytest.mark.parametrize('method', ('prod', 'cumprod'))
def test_never_ok(self, method):
with pytest.raises(TypeError):
getattr(self.mJy, method)()
with pytest.raises(TypeError):
getattr(self.m1, method)()
|
c96cd4ebbb45fc25f858b2e9385f22ccbd6781bce86e50f01ec578cdc259f449 | # coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test the Quantity class and related."""
import sys
import typing as T
import numpy as np
import pytest
from astropy import units as u
from astropy.units._typing import HAS_ANNOTATED, Annotated
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires py3.9+")
class TestQuantityTyping:
"""Test Quantity Typing Annotations."""
def test_quantity_typing(self):
"""Test type hint creation from Quantity."""
annot = u.Quantity[u.m]
assert T.get_origin(annot) is Annotated
assert T.get_args(annot) == (u.Quantity, u.m)
# test usage
def func(x: annot, y: str) -> u.Quantity[u.s]:
return x, y
annots = T.get_type_hints(func, include_extras=True)
assert annots["x"] is annot
assert annots["return"].__metadata__[0] == u.s
def test_metadata_in_annotation(self):
"""Test Quantity annotation with added metadata."""
multi_annot = u.Quantity[u.m, T.Any, np.dtype]
def multi_func(x: multi_annot, y: str):
return x, y
annots = T.get_type_hints(multi_func, include_extras=True)
assert annots["x"] == multi_annot
def test_optional_and_annotated(self):
"""Test Quantity annotation in an Optional."""
opt_annot = T.Optional[u.Quantity[u.m]]
def opt_func(x: opt_annot, y: str):
return x, y
annots = T.get_type_hints(opt_func, include_extras=True)
assert annots["x"] == opt_annot
def test_union_and_annotated(self):
"""Test Quantity annotation in a Union."""
# double Quantity[]
union_annot1 = T.Union[u.Quantity[u.m], u.Quantity[u.s]]
# one Quantity, one physical-type
union_annot2 = T.Union[u.Quantity[u.m], u.Quantity["time"]]
# one Quantity, one general type
union_annot3 = T.Union[u.Quantity[u.m / u.s], float]
def union_func(x: union_annot1, y: union_annot2) -> union_annot3:
if isinstance(y, str): # value = time
return x.value # returns <float>
else:
return x / y # returns Quantity[m / s]
annots = T.get_type_hints(union_func, include_extras=True)
assert annots["x"] == union_annot1
assert annots["y"] == union_annot2
assert annots["return"] == union_annot3
def test_quantity_subclass_typing(self):
"""Test type hint creation from a Quantity subclasses."""
class Length(u.SpecificTypeQuantity):
_equivalent_unit = u.m
annot = Length[u.km]
assert T.get_origin(annot) is Annotated
assert T.get_args(annot) == (Length, u.km)
|
f522f4f67f96d107ce297dfb48bb6f253debf274f7ef3cb0c5a9c493c2de273b | # coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test Structured units and quantities.
"""
import copy
import numpy as np
import numpy.lib.recfunctions as rfn
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.tests.helper import check_pickling_recovery, pickle_protocol
from astropy.units import Quantity, StructuredUnit, Unit, UnitBase
from astropy.utils.compat import NUMPY_LT_1_21_1
from astropy.utils.masked import Masked
class StructuredTestBase:
@classmethod
def setup_class(self):
self.pv_dtype = np.dtype([('p', 'f8'), ('v', 'f8')])
self.pv_t_dtype = np.dtype([('pv', self.pv_dtype), ('t', 'f8')])
self.p_unit = u.km
self.v_unit = u.km / u.s
self.t_unit = u.s
self.pv_dtype = np.dtype([('p', 'f8'), ('v', 'f8')])
self.pv_t_dtype = np.dtype([('pv', self.pv_dtype), ('t', 'f8')])
self.pv = np.array([(1., 0.25), (2., 0.5), (3., 0.75)],
self.pv_dtype)
self.pv_t = np.array([((4., 2.5), 0.),
((5., 5.0), 1.),
((6., 7.5), 2.)], self.pv_t_dtype)
class StructuredTestBaseWithUnits(StructuredTestBase):
@classmethod
def setup_class(self):
super().setup_class()
self.pv_unit = StructuredUnit((self.p_unit, self.v_unit),
('p', 'v'))
self.pv_t_unit = StructuredUnit((self.pv_unit, self.t_unit),
('pv', 't'))
class TestStructuredUnitBasics(StructuredTestBase):
def test_initialization_and_keying(self):
su = StructuredUnit((self.p_unit, self.v_unit), ('p', 'v'))
assert su['p'] is self.p_unit
assert su['v'] is self.v_unit
su2 = StructuredUnit((su, self.t_unit), ('pv', 't'))
assert isinstance(su2['pv'], StructuredUnit)
assert su2['pv']['p'] is self.p_unit
assert su2['pv']['v'] is self.v_unit
assert su2['t'] is self.t_unit
assert su2['pv'] == su
su3 = StructuredUnit(('AU', 'AU/day'), ('p', 'v'))
assert isinstance(su3['p'], UnitBase)
assert isinstance(su3['v'], UnitBase)
su4 = StructuredUnit('AU, AU/day', ('p', 'v'))
assert su4['p'] == u.AU
assert su4['v'] == u.AU / u.day
su5 = StructuredUnit(('AU', 'AU/day'))
assert su5.field_names == ('f0', 'f1')
assert su5['f0'] == u.AU
assert su5['f1'] == u.AU / u.day
def test_recursive_initialization(self):
su = StructuredUnit(((self.p_unit, self.v_unit), self.t_unit),
(('p', 'v'), 't'))
assert isinstance(su['pv'], StructuredUnit)
assert su['pv']['p'] is self.p_unit
assert su['pv']['v'] is self.v_unit
assert su['t'] is self.t_unit
su2 = StructuredUnit(((self.p_unit, self.v_unit), self.t_unit),
(['p_v', ('p', 'v')], 't'))
assert isinstance(su2['p_v'], StructuredUnit)
assert su2['p_v']['p'] is self.p_unit
assert su2['p_v']['v'] is self.v_unit
assert su2['t'] is self.t_unit
su3 = StructuredUnit((('AU', 'AU/day'), 'yr'),
(['p_v', ('p', 'v')], 't'))
assert isinstance(su3['p_v'], StructuredUnit)
assert su3['p_v']['p'] == u.AU
assert su3['p_v']['v'] == u.AU / u.day
assert su3['t'] == u.yr
su4 = StructuredUnit('(AU, AU/day), yr', (('p', 'v'), 't'))
assert isinstance(su4['pv'], StructuredUnit)
assert su4['pv']['p'] == u.AU
assert su4['pv']['v'] == u.AU / u.day
assert su4['t'] == u.yr
def test_extreme_recursive_initialization(self):
su = StructuredUnit('(yr,(AU,AU/day,(km,(day,day))),m)',
('t', ('p', 'v', ('h', ('d1', 'd2'))), 'l'))
assert su.field_names == ('t', ['pvhd1d2',
('p', 'v',
['hd1d2',
('h',
['d1d2',
('d1', 'd2')])])], 'l')
@pytest.mark.parametrize('names, invalid', [
[('t', ['p', 'v']), "['p', 'v']"],
[('t', ['pv', 'p', 'v']), "['pv', 'p', 'v']"],
[('t', ['pv', ['p', 'v']]), "['pv', ['p', 'v']"],
[('t', ()), "()"],
[('t', ('p', None)), "None"],
[('t', ['pv', ('p', '')]), "''"]])
def test_initialization_names_invalid_list_errors(self, names, invalid):
with pytest.raises(ValueError) as exc:
StructuredUnit('(yr,(AU,AU/day)', names)
assert f'invalid entry {invalid}' in str(exc)
def test_looks_like_unit(self):
su = StructuredUnit((self.p_unit, self.v_unit), ('p', 'v'))
assert Unit(su) is su
def test_initialize_with_float_dtype(self):
su = StructuredUnit(('AU', 'AU/d'), self.pv_dtype)
assert isinstance(su['p'], UnitBase)
assert isinstance(su['v'], UnitBase)
assert su['p'] == u.AU
assert su['v'] == u.AU / u.day
su = StructuredUnit((('km', 'km/s'), 'yr'), self.pv_t_dtype)
assert isinstance(su['pv'], StructuredUnit)
assert isinstance(su['pv']['p'], UnitBase)
assert isinstance(su['t'], UnitBase)
assert su['pv']['v'] == u.km / u.s
su = StructuredUnit('(km, km/s), yr', self.pv_t_dtype)
assert isinstance(su['pv'], StructuredUnit)
assert isinstance(su['pv']['p'], UnitBase)
assert isinstance(su['t'], UnitBase)
assert su['pv']['v'] == u.km / u.s
def test_initialize_with_structured_unit_for_names(self):
su = StructuredUnit(('AU', 'AU/d'), names=('p', 'v'))
su2 = StructuredUnit(('km', 'km/s'), names=su)
assert su2.field_names == ('p', 'v')
assert su2['p'] == u.km
assert su2['v'] == u.km / u.s
def test_initialize_single_field(self):
su = StructuredUnit('AU', 'p')
assert isinstance(su, StructuredUnit)
assert isinstance(su['p'], UnitBase)
assert su['p'] == u.AU
su = StructuredUnit('AU')
assert isinstance(su, StructuredUnit)
assert isinstance(su['f0'], UnitBase)
assert su['f0'] == u.AU
def test_equality(self):
su = StructuredUnit(('AU', 'AU/d'), self.pv_dtype)
assert su == StructuredUnit(('AU', 'AU/d'), self.pv_dtype)
assert su != StructuredUnit(('m', 'AU/d'), self.pv_dtype)
# Names should be ignored.
assert su == StructuredUnit(('AU', 'AU/d'))
assert su == StructuredUnit(('AU', 'AU/d'), names=('q', 'w'))
assert su != StructuredUnit(('m', 'm/s'))
def test_parsing(self):
su = Unit('AU, AU/d')
assert isinstance(su, StructuredUnit)
assert isinstance(su['f0'], UnitBase)
assert isinstance(su['f1'], UnitBase)
assert su['f0'] == u.AU
assert su['f1'] == u.AU/u.day
su2 = Unit('AU, AU/d, yr')
assert isinstance(su2, StructuredUnit)
assert su2 == StructuredUnit(('AU', 'AU/d', 'yr'))
su2a = Unit('(AU, AU/d, yr)')
assert isinstance(su2a, StructuredUnit)
assert su2a == su2
su3 = Unit('(km, km/s), yr')
assert isinstance(su3, StructuredUnit)
assert su3 == StructuredUnit((('km', 'km/s'), 'yr'))
su4 = Unit('km,')
assert isinstance(su4, StructuredUnit)
assert su4 == StructuredUnit((u.km,))
su5 = Unit('(m,s),')
assert isinstance(su5, StructuredUnit)
assert su5 == StructuredUnit(((u.m, u.s),))
ldbody_unit = Unit('Msun, 0.5rad^2, (au, au/day)')
assert ldbody_unit == StructuredUnit(
(u.Msun, Unit(u.rad**2 / 2), (u.AU, u.AU / u.day)))
def test_to_string(self):
su = StructuredUnit((u.km, u.km/u.s))
latex_str = r'$(\mathrm{km}, \mathrm{\frac{km}{s}})$'
assert su.to_string(format='latex') == latex_str
latex_str = r'$(\mathrm{km}, \mathrm{km\,s^{-1}})$'
assert su.to_string(format='latex_inline') == latex_str
def test_str(self):
su = StructuredUnit(((u.km, u.km/u.s), u.yr))
assert str(su) == '((km, km / s), yr)'
assert Unit(str(su)) == su
def test_repr(self):
su = StructuredUnit(((u.km, u.km/u.s), u.yr))
assert repr(su) == 'Unit("((km, km / s), yr)")'
assert eval(repr(su)) == su
class TestStructuredUnitsCopyPickle(StructuredTestBaseWithUnits):
def test_copy(self):
su_copy = copy.copy(self.pv_t_unit)
assert su_copy is not self.pv_t_unit
assert su_copy == self.pv_t_unit
assert su_copy._units is self.pv_t_unit._units
def test_deepcopy(self):
su_copy = copy.deepcopy(self.pv_t_unit)
assert su_copy is not self.pv_t_unit
assert su_copy == self.pv_t_unit
assert su_copy._units is not self.pv_t_unit._units
@pytest.mark.skipif(NUMPY_LT_1_21_1, reason="https://stackoverflow.com/q/69571643")
def test_pickle(self, pickle_protocol):
check_pickling_recovery(self.pv_t_unit, pickle_protocol)
class TestStructuredUnitAsMapping(StructuredTestBaseWithUnits):
def test_len(self):
assert len(self.pv_unit) == 2
assert len(self.pv_t_unit) == 2
def test_keys(self):
slv = list(self.pv_t_unit.keys())
assert slv == ['pv', 't']
def test_values(self):
values = self.pv_t_unit.values()
assert values == (self.pv_unit, self.t_unit)
def test_field_names(self):
field_names = self.pv_t_unit.field_names
assert isinstance(field_names, tuple)
assert field_names == (['pv', ('p', 'v')], 't')
@pytest.mark.parametrize('iterable', [list, set])
def test_as_iterable(self, iterable):
sl = iterable(self.pv_unit)
assert isinstance(sl, iterable)
assert sl == iterable(['p', 'v'])
def test_as_dict(self):
sd = dict(self.pv_t_unit)
assert sd == {'pv': self.pv_unit, 't': self.t_unit}
def test_contains(self):
assert 'p' in self.pv_unit
assert 'v' in self.pv_unit
assert 't' not in self.pv_unit
def test_setitem_fails(self):
with pytest.raises(TypeError, match='item assignment'):
self.pv_t_unit['t'] = u.Gyr
class TestStructuredUnitMethods(StructuredTestBaseWithUnits):
def test_physical_type_id(self):
pv_ptid = self.pv_unit._get_physical_type_id()
assert len(pv_ptid) == 2
assert pv_ptid.dtype.names == ('p', 'v')
p_ptid = self.pv_unit['p']._get_physical_type_id()
v_ptid = self.pv_unit['v']._get_physical_type_id()
# Expected should be (subclass of) void, with structured object dtype.
expected = np.array((p_ptid, v_ptid), [('p', 'O'), ('v', 'O')])[()]
assert pv_ptid == expected
# Names should be ignored in comparison.
assert pv_ptid == np.array((p_ptid, v_ptid), 'O,O')[()]
# Should be possible to address by field and by number.
assert pv_ptid['p'] == p_ptid
assert pv_ptid['v'] == v_ptid
assert pv_ptid[0] == p_ptid
assert pv_ptid[1] == v_ptid
# More complicated version.
pv_t_ptid = self.pv_t_unit._get_physical_type_id()
t_ptid = self.t_unit._get_physical_type_id()
assert pv_t_ptid == np.array((pv_ptid, t_ptid), 'O,O')[()]
assert pv_t_ptid['pv'] == pv_ptid
assert pv_t_ptid['t'] == t_ptid
assert pv_t_ptid['pv'][1] == v_ptid
def test_physical_type(self):
pv_pt = self.pv_unit.physical_type
assert pv_pt == np.array(('length', 'speed'), 'O,O')[()]
pv_t_pt = self.pv_t_unit.physical_type
assert pv_t_pt == np.array((pv_pt, 'time'), 'O,O')[()]
def test_si(self):
pv_t_si = self.pv_t_unit.si
assert pv_t_si == self.pv_t_unit
assert pv_t_si['pv']['v'].scale == 1000
def test_cgs(self):
pv_t_cgs = self.pv_t_unit.cgs
assert pv_t_cgs == self.pv_t_unit
assert pv_t_cgs['pv']['v'].scale == 100000
def test_decompose(self):
pv_t_decompose = self.pv_t_unit.decompose()
assert pv_t_decompose['pv']['v'].scale == 1000
def test_is_equivalent(self):
assert self.pv_unit.is_equivalent(('AU', 'AU/day'))
assert not self.pv_unit.is_equivalent('m')
assert not self.pv_unit.is_equivalent(('AU', 'AU'))
# Names should be ignored.
pv_alt = StructuredUnit('m,m/s', names=('q', 'w'))
assert pv_alt.field_names != self.pv_unit.field_names
assert self.pv_unit.is_equivalent(pv_alt)
# Regular units should work too.
assert not u.m.is_equivalent(self.pv_unit)
def test_conversion(self):
pv1 = self.pv_unit.to(('AU', 'AU/day'), self.pv)
assert isinstance(pv1, np.ndarray)
assert pv1.dtype == self.pv.dtype
assert np.all(pv1['p'] * u.AU == self.pv['p'] * self.p_unit)
assert np.all(pv1['v'] * u.AU / u.day == self.pv['v'] * self.v_unit)
# Names should be from value.
su2 = StructuredUnit((self.p_unit, self.v_unit),
('position', 'velocity'))
pv2 = su2.to(('Mm', 'mm/s'), self.pv)
assert pv2.dtype.names == ('p', 'v')
assert pv2.dtype == self.pv.dtype
# Check recursion.
pv_t1 = self.pv_t_unit.to((('AU', 'AU/day'), 'Myr'), self.pv_t)
assert isinstance(pv_t1, np.ndarray)
assert pv_t1.dtype == self.pv_t.dtype
assert np.all(pv_t1['pv']['p'] * u.AU ==
self.pv_t['pv']['p'] * self.p_unit)
assert np.all(pv_t1['pv']['v'] * u.AU / u.day ==
self.pv_t['pv']['v'] * self.v_unit)
assert np.all(pv_t1['t'] * u.Myr == self.pv_t['t'] * self.t_unit)
# Passing in tuples should work.
pv_t2 = self.pv_t_unit.to((('AU', 'AU/day'), 'Myr'),
((1., 0.1), 10.))
assert pv_t2['pv']['p'] == self.p_unit.to('AU', 1.)
assert pv_t2['pv']['v'] == self.v_unit.to('AU/day', 0.1)
assert pv_t2['t'] == self.t_unit.to('Myr', 10.)
pv_t3 = self.pv_t_unit.to((('AU', 'AU/day'), 'Myr'),
[((1., 0.1), 10.),
((2., 0.2), 20.)])
assert np.all(pv_t3['pv']['p'] == self.p_unit.to('AU', [1., 2.]))
assert np.all(pv_t3['pv']['v'] == self.v_unit.to('AU/day', [0.1, 0.2]))
assert np.all(pv_t3['t'] == self.t_unit.to('Myr', [10., 20.]))
class TestStructuredUnitArithmatic(StructuredTestBaseWithUnits):
def test_multiplication(self):
pv_times_au = self.pv_unit * u.au
assert isinstance(pv_times_au, StructuredUnit)
assert pv_times_au.field_names == ('p', 'v')
assert pv_times_au['p'] == self.p_unit * u.AU
assert pv_times_au['v'] == self.v_unit * u.AU
au_times_pv = u.au * self.pv_unit
assert au_times_pv == pv_times_au
pv_times_au2 = self.pv_unit * 'au'
assert pv_times_au2 == pv_times_au
au_times_pv2 = 'AU' * self.pv_unit
assert au_times_pv2 == pv_times_au
with pytest.raises(TypeError):
self.pv_unit * self.pv_unit
with pytest.raises(TypeError):
's,s' * self.pv_unit
def test_division(self):
pv_by_s = self.pv_unit / u.s
assert isinstance(pv_by_s, StructuredUnit)
assert pv_by_s.field_names == ('p', 'v')
assert pv_by_s['p'] == self.p_unit / u.s
assert pv_by_s['v'] == self.v_unit / u.s
pv_by_s2 = self.pv_unit / 's'
assert pv_by_s2 == pv_by_s
with pytest.raises(TypeError):
1. / self.pv_unit
with pytest.raises(TypeError):
u.s / self.pv_unit
class TestStructuredQuantity(StructuredTestBaseWithUnits):
def test_initialization_and_keying(self):
q_pv = Quantity(self.pv, self.pv_unit)
q_p = q_pv['p']
assert isinstance(q_p, Quantity)
assert isinstance(q_p.unit, UnitBase)
assert np.all(q_p == self.pv['p'] * self.pv_unit['p'])
q_v = q_pv['v']
assert isinstance(q_v, Quantity)
assert isinstance(q_v.unit, UnitBase)
assert np.all(q_v == self.pv['v'] * self.pv_unit['v'])
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_t = q_pv_t['t']
assert np.all(q_t == self.pv_t['t'] * self.pv_t_unit['t'])
q_pv2 = q_pv_t['pv']
assert isinstance(q_pv2, Quantity)
assert q_pv2.unit == self.pv_unit
with pytest.raises(ValueError):
Quantity(self.pv, self.pv_t_unit)
with pytest.raises(ValueError):
Quantity(self.pv_t, self.pv_unit)
def test_initialization_with_unit_tuples(self):
q_pv_t = Quantity(self.pv_t, (('km', 'km/s'), 's'))
assert isinstance(q_pv_t.unit, StructuredUnit)
assert q_pv_t.unit == self.pv_t_unit
def test_initialization_with_string(self):
q_pv_t = Quantity(self.pv_t, '(km, km/s), s')
assert isinstance(q_pv_t.unit, StructuredUnit)
assert q_pv_t.unit == self.pv_t_unit
def test_initialization_by_multiplication_with_unit(self):
q_pv_t = self.pv_t * self.pv_t_unit
assert q_pv_t.unit is self.pv_t_unit
assert np.all(q_pv_t.value == self.pv_t)
assert not np.may_share_memory(q_pv_t, self.pv_t)
q_pv_t2 = self.pv_t_unit * self.pv_t
assert q_pv_t.unit is self.pv_t_unit
# Not testing equality of structured Quantity here.
assert np.all(q_pv_t2.value == q_pv_t.value)
def test_initialization_by_shifting_to_unit(self):
q_pv_t = self.pv_t << self.pv_t_unit
assert q_pv_t.unit is self.pv_t_unit
assert np.all(q_pv_t.value == self.pv_t)
assert np.may_share_memory(q_pv_t, self.pv_t)
def test_getitem(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t01 = q_pv_t[:2]
assert isinstance(q_pv_t01, Quantity)
assert q_pv_t01.unit == q_pv_t.unit
assert np.all(q_pv_t01['t'] == q_pv_t['t'][:2])
q_pv_t1 = q_pv_t[1]
assert isinstance(q_pv_t1, Quantity)
assert q_pv_t1.unit == q_pv_t.unit
assert q_pv_t1.shape == ()
assert q_pv_t1['t'] == q_pv_t['t'][1]
def test_value(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
value = q_pv_t.value
assert type(value) is np.ndarray
assert np.all(value == self.pv_t)
value1 = q_pv_t[1].value
assert type(value1) is np.void
assert np.all(value1 == self.pv_t[1])
def test_conversion(self):
q_pv = Quantity(self.pv, self.pv_unit)
q1 = q_pv.to(('AU', 'AU/day'))
assert isinstance(q1, Quantity)
assert q1['p'].unit == u.AU
assert q1['v'].unit == u.AU / u.day
assert np.all(q1['p'] == q_pv['p'].to(u.AU))
assert np.all(q1['v'] == q_pv['v'].to(u.AU/u.day))
q2 = q_pv.to(self.pv_unit)
assert q2['p'].unit == self.p_unit
assert q2['v'].unit == self.v_unit
assert np.all(q2['p'].value == self.pv['p'])
assert np.all(q2['v'].value == self.pv['v'])
assert not np.may_share_memory(q2, q_pv)
pv1 = q_pv.to_value(('AU', 'AU/day'))
assert type(pv1) is np.ndarray
assert np.all(pv1['p'] == q_pv['p'].to_value(u.AU))
assert np.all(pv1['v'] == q_pv['v'].to_value(u.AU/u.day))
pv11 = q_pv[1].to_value(('AU', 'AU/day'))
assert type(pv11) is np.void
assert pv11 == pv1[1]
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q2 = q_pv_t.to((('kpc', 'kpc/Myr'), 'Myr'))
assert q2['pv']['p'].unit == u.kpc
assert q2['pv']['v'].unit == u.kpc / u.Myr
assert q2['t'].unit == u.Myr
assert np.all(q2['pv']['p'] == q_pv_t['pv']['p'].to(u.kpc))
assert np.all(q2['pv']['v'] == q_pv_t['pv']['v'].to(u.kpc/u.Myr))
assert np.all(q2['t'] == q_pv_t['t'].to(u.Myr))
def test_conversion_via_lshift(self):
q_pv = Quantity(self.pv, self.pv_unit)
q1 = q_pv << StructuredUnit(('AU', 'AU/day'))
assert isinstance(q1, Quantity)
assert q1['p'].unit == u.AU
assert q1['v'].unit == u.AU / u.day
assert np.all(q1['p'] == q_pv['p'].to(u.AU))
assert np.all(q1['v'] == q_pv['v'].to(u.AU/u.day))
q2 = q_pv << self.pv_unit
assert q2['p'].unit == self.p_unit
assert q2['v'].unit == self.v_unit
assert np.all(q2['p'].value == self.pv['p'])
assert np.all(q2['v'].value == self.pv['v'])
assert np.may_share_memory(q2, q_pv)
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q2 = q_pv_t << '(kpc,kpc/Myr),Myr'
assert q2['pv']['p'].unit == u.kpc
assert q2['pv']['v'].unit == u.kpc / u.Myr
assert q2['t'].unit == u.Myr
assert np.all(q2['pv']['p'] == q_pv_t['pv']['p'].to(u.kpc))
assert np.all(q2['pv']['v'] == q_pv_t['pv']['v'].to(u.kpc/u.Myr))
assert np.all(q2['t'] == q_pv_t['t'].to(u.Myr))
def test_inplace_conversion(self):
q_pv = Quantity(self.pv, self.pv_unit)
q1 = q_pv.copy()
q_link = q1
q1 <<= StructuredUnit(('AU', 'AU/day'))
assert q1 is q_link
assert q1['p'].unit == u.AU
assert q1['v'].unit == u.AU / u.day
assert np.all(q1['p'] == q_pv['p'].to(u.AU))
assert np.all(q1['v'] == q_pv['v'].to(u.AU/u.day))
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q2 = q_pv_t.copy()
q_link = q2
q2 <<= '(kpc,kpc/Myr),Myr'
assert q2 is q_link
assert q2['pv']['p'].unit == u.kpc
assert q2['pv']['v'].unit == u.kpc / u.Myr
assert q2['t'].unit == u.Myr
assert np.all(q2['pv']['p'] == q_pv_t['pv']['p'].to(u.kpc))
assert np.all(q2['pv']['v'] == q_pv_t['pv']['v'].to(u.kpc/u.Myr))
assert np.all(q2['t'] == q_pv_t['t'].to(u.Myr))
def test_si(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t_si = q_pv_t.si
assert_array_equal(q_pv_t_si, q_pv_t.to('(m,m/s),s'))
def test_cgs(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t_cgs = q_pv_t.cgs
assert_array_equal(q_pv_t_cgs, q_pv_t.to('(cm,cm/s),s'))
def test_equality(self):
q_pv = Quantity(self.pv, self.pv_unit)
equal = q_pv == q_pv
not_equal = q_pv != q_pv
assert np.all(equal)
assert not np.any(not_equal)
equal2 = q_pv == q_pv[1]
not_equal2 = q_pv != q_pv[1]
assert np.all(equal2 == [False, True, False])
assert np.all(not_equal2 != equal2)
q1 = q_pv.to(('AU', 'AU/day'))
# Ensure same conversion is done, by placing q1 first.
assert np.all(q1 == q_pv)
assert not np.any(q1 != q_pv)
# Check different names in dtype.
assert np.all(q1.value * u.Unit('AU, AU/day') == q_pv)
assert not np.any(q1.value * u.Unit('AU, AU/day') != q_pv)
assert (q_pv == 'b') is False
assert ('b' != q_pv) is True
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
assert np.all((q_pv_t[2] == q_pv_t) == [False, False, True])
assert np.all((q_pv_t[2] != q_pv_t) != [False, False, True])
assert (q_pv == q_pv_t) is False
assert (q_pv_t != q_pv) is True
def test_setitem(self):
q_pv = Quantity(self.pv, self.pv_unit)
q_pv[1] = (2., 2.) * self.pv_unit
assert q_pv[1].value == np.array((2., 2.), self.pv_dtype)
q_pv[1:2] = (1., 0.5) * u.Unit('AU, AU/day')
assert q_pv['p'][1] == 1. * u.AU
assert q_pv['v'][1] == 0.5 * u.AU / u.day
q_pv['v'] = 1. * u.km / u.s
assert np.all(q_pv['v'] == 1. * u.km / u.s)
with pytest.raises(u.UnitsError):
q_pv[1] = (1., 1.) * u.Unit('AU, AU')
with pytest.raises(u.UnitsError):
q_pv['v'] = 1. * u.km
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t[1] = ((2., 2.), 3.) * self.pv_t_unit
assert q_pv_t[1].value == np.array(((2., 2.), 3.), self.pv_t_dtype)
q_pv_t[1:2] = ((1., 0.5), 5.) * u.Unit('(AU, AU/day), yr')
assert q_pv_t['pv'][1] == (1., 0.5) * u.Unit('AU, AU/day')
assert q_pv_t['t'][1] == 5. * u.yr
q_pv_t['pv'] = (1., 0.5) * self.pv_unit
assert np.all(q_pv_t['pv'] == (1., 0.5) * self.pv_unit)
class TestStructuredQuantityFunctions(StructuredTestBaseWithUnits):
@classmethod
def setup_class(self):
super().setup_class()
self.q_pv = self.pv << self.pv_unit
self.q_pv_t = self.pv_t << self.pv_t_unit
def test_empty_like(self):
z = np.empty_like(self.q_pv)
assert z.dtype == self.pv_dtype
assert z.unit == self.pv_unit
assert z.shape == self.pv.shape
@pytest.mark.parametrize('func', [np.zeros_like, np.ones_like])
def test_zeros_ones_like(self, func):
z = func(self.q_pv)
assert z.dtype == self.pv_dtype
assert z.unit == self.pv_unit
assert z.shape == self.pv.shape
assert_array_equal(z, func(self.pv) << self.pv_unit)
def test_structured_to_unstructured(self):
# can't unstructure something with incompatible units
with pytest.raises(u.UnitConversionError, match="'km / s'"):
rfn.structured_to_unstructured(self.q_pv)
# For the other tests of ``structured_to_unstructured``, see
# ``test_quantity_non_ufuncs.TestRecFunctions.test_structured_to_unstructured``
def test_unstructured_to_structured(self):
# can't structure something that's already structured
dtype = np.dtype([("f1", float), ("f2", float)])
with pytest.raises(ValueError, match="The length of the last dimension"):
rfn.unstructured_to_structured(self.q_pv, dtype=self.q_pv.dtype)
# For the other tests of ``structured_to_unstructured``, see
# ``test_quantity_non_ufuncs.TestRecFunctions.test_unstructured_to_structured``
class TestStructuredSpecificTypeQuantity(StructuredTestBaseWithUnits):
def setup_class(self):
super().setup_class()
class PositionVelocity(u.SpecificTypeQuantity):
_equivalent_unit = self.pv_unit
self.PositionVelocity = PositionVelocity
def test_init(self):
pv = self.PositionVelocity(self.pv, self.pv_unit)
assert isinstance(pv, self.PositionVelocity)
assert type(pv['p']) is u.Quantity
assert_array_equal(pv['p'], self.pv['p'] << self.pv_unit['p'])
pv2 = self.PositionVelocity(self.pv, 'AU,AU/day')
assert_array_equal(pv2['p'], self.pv['p'] << u.AU)
def test_error_on_non_equivalent_unit(self):
with pytest.raises(u.UnitsError):
self.PositionVelocity(self.pv, 'AU')
with pytest.raises(u.UnitsError):
self.PositionVelocity(self.pv, 'AU,yr')
class TestStructuredLogUnit:
def setup_class(self):
self.mag_time_dtype = np.dtype([('mag', 'f8'), ('t', 'f8')])
self.mag_time = np.array([(20., 10.), (25., 100.)], self.mag_time_dtype)
def test_unit_initialization(self):
mag_time_unit = StructuredUnit((u.STmag, u.s), self.mag_time_dtype)
assert mag_time_unit['mag'] == u.STmag
assert mag_time_unit['t'] == u.s
mag_time_unit2 = u.Unit('mag(ST),s')
assert mag_time_unit2 == mag_time_unit
def test_quantity_initialization(self):
su = u.Unit('mag(ST),s')
mag_time = self.mag_time << su
assert isinstance(mag_time['mag'], u.Magnitude)
assert isinstance(mag_time['t'], u.Quantity)
assert mag_time.unit == su
assert_array_equal(mag_time['mag'], self.mag_time['mag'] << u.STmag)
assert_array_equal(mag_time['t'], self.mag_time['t'] << u.s)
def test_quantity_si(self):
mag_time = self.mag_time << u.Unit('mag(ST),yr')
mag_time_si = mag_time.si
assert_array_equal(mag_time_si['mag'], mag_time['mag'].si)
assert_array_equal(mag_time_si['t'], mag_time['t'].si)
class TestStructuredMaskedQuantity(StructuredTestBaseWithUnits):
"""Somewhat minimal tests. Conversion is most stringent."""
def setup_class(self):
super().setup_class()
self.qpv = self.pv << self.pv_unit
self.pv_mask = np.array([(True, False),
(False, False),
(False, True)], [('p', bool), ('v', bool)])
self.mpv = Masked(self.qpv, mask=self.pv_mask)
def test_init(self):
assert isinstance(self.mpv, Masked)
assert isinstance(self.mpv, Quantity)
assert_array_equal(self.mpv.unmasked, self.qpv)
assert_array_equal(self.mpv.mask, self.pv_mask)
def test_slicing(self):
mp = self.mpv['p']
assert isinstance(mp, Masked)
assert isinstance(mp, Quantity)
assert_array_equal(mp.unmasked, self.qpv['p'])
assert_array_equal(mp.mask, self.pv_mask['p'])
def test_conversion(self):
mpv = self.mpv.to('AU,AU/day')
assert isinstance(mpv, Masked)
assert isinstance(mpv, Quantity)
assert_array_equal(mpv.unmasked, self.qpv.to('AU,AU/day'))
assert_array_equal(mpv.mask, self.pv_mask)
assert np.all(mpv == self.mpv)
def test_si(self):
mpv = self.mpv.si
assert isinstance(mpv, Masked)
assert isinstance(mpv, Quantity)
assert_array_equal(mpv.unmasked, self.qpv.si)
assert_array_equal(mpv.mask, self.pv_mask)
assert np.all(mpv == self.mpv)
|
f4ead3f4385c0c4b1c343b8bb735489cbf148214aba3f66208179c1c372f995d | # coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test the Quantity class and related."""
import copy
import decimal
import numbers
import pickle
from fractions import Fraction
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal
from astropy import units as u
from astropy.units.quantity import _UNIT_NOT_INITIALISED
from astropy.utils import isiterable, minversion
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
""" The Quantity class will represent a number + unit + uncertainty """
class TestQuantityCreation:
def test_1(self):
# create objects through operations with Unit objects:
quantity = 11.42 * u.meter # returns a Quantity object
assert isinstance(quantity, u.Quantity)
quantity = u.meter * 11.42 # returns a Quantity object
assert isinstance(quantity, u.Quantity)
quantity = 11.42 / u.meter
assert isinstance(quantity, u.Quantity)
quantity = u.meter / 11.42
assert isinstance(quantity, u.Quantity)
quantity = 11.42 * u.meter / u.second
assert isinstance(quantity, u.Quantity)
with pytest.raises(TypeError):
quantity = 182.234 + u.meter
with pytest.raises(TypeError):
quantity = 182.234 - u.meter
with pytest.raises(TypeError):
quantity = 182.234 % u.meter
def test_2(self):
# create objects using the Quantity constructor:
_ = u.Quantity(11.412, unit=u.meter)
_ = u.Quantity(21.52, "cm")
q3 = u.Quantity(11.412)
# By default quantities that don't specify a unit are unscaled
# dimensionless
assert q3.unit == u.Unit(1)
with pytest.raises(TypeError):
u.Quantity(object(), unit=u.m)
def test_3(self):
# with pytest.raises(u.UnitsError):
with pytest.raises(ValueError): # Until @mdboom fixes the errors in units
u.Quantity(11.412, unit="testingggg")
def test_nan_inf(self):
# Not-a-number
q = u.Quantity('nan', unit='cm')
assert np.isnan(q.value)
q = u.Quantity('NaN', unit='cm')
assert np.isnan(q.value)
q = u.Quantity('-nan', unit='cm') # float() allows this
assert np.isnan(q.value)
q = u.Quantity('nan cm')
assert np.isnan(q.value)
assert q.unit == u.cm
# Infinity
q = u.Quantity('inf', unit='cm')
assert np.isinf(q.value)
q = u.Quantity('-inf', unit='cm')
assert np.isinf(q.value)
q = u.Quantity('inf cm')
assert np.isinf(q.value)
assert q.unit == u.cm
q = u.Quantity('Infinity', unit='cm') # float() allows this
assert np.isinf(q.value)
# make sure these strings don't parse...
with pytest.raises(TypeError):
q = u.Quantity('', unit='cm')
with pytest.raises(TypeError):
q = u.Quantity('spam', unit='cm')
def test_unit_property(self):
# test getting and setting 'unit' attribute
q1 = u.Quantity(11.4, unit=u.meter)
with pytest.raises(AttributeError):
q1.unit = u.cm
def test_preserve_dtype(self):
"""Test that if an explicit dtype is given, it is used, while if not,
numbers are converted to float (including decimal.Decimal, which
numpy converts to an object; closes #1419)
"""
# If dtype is specified, use it, but if not, convert int, bool to float
q1 = u.Quantity(12, unit=u.m / u.s, dtype=int)
assert q1.dtype == int
q2 = u.Quantity(q1)
assert q2.dtype == float
assert q2.value == float(q1.value)
assert q2.unit == q1.unit
# but we should preserve any float32 or even float16
a3_32 = np.array([1., 2.], dtype=np.float32)
q3_32 = u.Quantity(a3_32, u.yr)
assert q3_32.dtype == a3_32.dtype
a3_16 = np.array([1., 2.], dtype=np.float16)
q3_16 = u.Quantity(a3_16, u.yr)
assert q3_16.dtype == a3_16.dtype
# items stored as objects by numpy should be converted to float
# by default
q4 = u.Quantity(decimal.Decimal('10.25'), u.m)
assert q4.dtype == float
q5 = u.Quantity(decimal.Decimal('10.25'), u.m, dtype=object)
assert q5.dtype == object
def test_copy(self):
# By default, a new quantity is constructed, but not if copy=False
a = np.arange(10.)
q0 = u.Quantity(a, unit=u.m / u.s)
assert q0.base is not a
q1 = u.Quantity(a, unit=u.m / u.s, copy=False)
assert q1.base is a
q2 = u.Quantity(q0)
assert q2 is not q0
assert q2.base is not q0.base
q2 = u.Quantity(q0, copy=False)
assert q2 is q0
assert q2.base is q0.base
q3 = u.Quantity(q0, q0.unit, copy=False)
assert q3 is q0
assert q3.base is q0.base
q4 = u.Quantity(q0, u.cm / u.s, copy=False)
assert q4 is not q0
assert q4.base is not q0.base
def test_subok(self):
"""Test subok can be used to keep class, or to insist on Quantity"""
class MyQuantitySubclass(u.Quantity):
pass
myq = MyQuantitySubclass(np.arange(10.), u.m)
# try both with and without changing the unit
assert type(u.Quantity(myq)) is u.Quantity
assert type(u.Quantity(myq, subok=True)) is MyQuantitySubclass
assert type(u.Quantity(myq, u.km)) is u.Quantity
assert type(u.Quantity(myq, u.km, subok=True)) is MyQuantitySubclass
def test_order(self):
"""Test that order is correctly propagated to np.array"""
ac = np.array(np.arange(10.), order='C')
qcc = u.Quantity(ac, u.m, order='C')
assert qcc.flags['C_CONTIGUOUS']
qcf = u.Quantity(ac, u.m, order='F')
assert qcf.flags['F_CONTIGUOUS']
qca = u.Quantity(ac, u.m, order='A')
assert qca.flags['C_CONTIGUOUS']
# check it works also when passing in a quantity
assert u.Quantity(qcc, order='C').flags['C_CONTIGUOUS']
assert u.Quantity(qcc, order='A').flags['C_CONTIGUOUS']
assert u.Quantity(qcc, order='F').flags['F_CONTIGUOUS']
af = np.array(np.arange(10.), order='F')
qfc = u.Quantity(af, u.m, order='C')
assert qfc.flags['C_CONTIGUOUS']
qff = u.Quantity(ac, u.m, order='F')
assert qff.flags['F_CONTIGUOUS']
qfa = u.Quantity(af, u.m, order='A')
assert qfa.flags['F_CONTIGUOUS']
assert u.Quantity(qff, order='C').flags['C_CONTIGUOUS']
assert u.Quantity(qff, order='A').flags['F_CONTIGUOUS']
assert u.Quantity(qff, order='F').flags['F_CONTIGUOUS']
def test_ndmin(self):
"""Test that ndmin is correctly propagated to np.array"""
a = np.arange(10.)
q1 = u.Quantity(a, u.m, ndmin=1)
assert q1.ndim == 1 and q1.shape == (10,)
q2 = u.Quantity(a, u.m, ndmin=2)
assert q2.ndim == 2 and q2.shape == (1, 10)
# check it works also when passing in a quantity
q3 = u.Quantity(q1, u.m, ndmin=3)
assert q3.ndim == 3 and q3.shape == (1, 1, 10)
# see github issue #10063
assert u.Quantity(u.Quantity(1, 'm'), 'm', ndmin=1).ndim == 1
assert u.Quantity(u.Quantity(1, 'cm'), 'm', ndmin=1).ndim == 1
def test_non_quantity_with_unit(self):
"""Test that unit attributes in objects get recognized."""
class MyQuantityLookalike(np.ndarray):
pass
a = np.arange(3.)
mylookalike = a.copy().view(MyQuantityLookalike)
mylookalike.unit = 'm'
q1 = u.Quantity(mylookalike)
assert isinstance(q1, u.Quantity)
assert q1.unit is u.m
assert np.all(q1.value == a)
q2 = u.Quantity(mylookalike, u.mm)
assert q2.unit is u.mm
assert np.all(q2.value == 1000.*a)
q3 = u.Quantity(mylookalike, copy=False)
assert np.all(q3.value == mylookalike)
q3[2] = 0
assert q3[2] == 0.
assert mylookalike[2] == 0.
mylookalike = a.copy().view(MyQuantityLookalike)
mylookalike.unit = u.m
q4 = u.Quantity(mylookalike, u.mm, copy=False)
q4[2] = 0
assert q4[2] == 0.
assert mylookalike[2] == 2.
mylookalike.unit = 'nonsense'
with pytest.raises(TypeError):
u.Quantity(mylookalike)
def test_creation_via_view(self):
# This works but is no better than 1. * u.m
q1 = 1. << u.m
assert isinstance(q1, u.Quantity)
assert q1.unit == u.m
assert q1.value == 1.
# With an array, we get an actual view.
a2 = np.arange(10.)
q2 = a2 << u.m / u.s
assert isinstance(q2, u.Quantity)
assert q2.unit == u.m / u.s
assert np.all(q2.value == a2)
a2[9] = 0.
assert np.all(q2.value == a2)
# But with a unit change we get a copy.
q3 = q2 << u.mm / u.s
assert isinstance(q3, u.Quantity)
assert q3.unit == u.mm / u.s
assert np.all(q3.value == a2 * 1000.)
a2[8] = 0.
assert q3[8].value == 8000.
# Without a unit change, we do get a view.
q4 = q2 << q2.unit
a2[7] = 0.
assert np.all(q4.value == a2)
with pytest.raises(u.UnitsError):
q2 << u.s
# But one can do an in-place unit change.
a2_copy = a2.copy()
q2 <<= u.mm / u.s
assert q2.unit == u.mm / u.s
# Of course, this changes a2 as well.
assert np.all(q2.value == a2)
# Sanity check on the values.
assert np.all(q2.value == a2_copy * 1000.)
a2[8] = -1.
# Using quantities, one can also work with strings.
q5 = q2 << 'km/hr'
assert q5.unit == u.km / u.hr
assert np.all(q5 == q2)
# Finally, we can use scalar quantities as units.
not_quite_a_foot = 30. * u.cm
a6 = np.arange(5.)
q6 = a6 << not_quite_a_foot
assert q6.unit == u.Unit(not_quite_a_foot)
assert np.all(q6.to_value(u.cm) == 30. * a6)
def test_rshift_warns(self):
with pytest.raises(TypeError), \
pytest.warns(AstropyWarning, match='is not implemented') as warning_lines:
1 >> u.m
assert len(warning_lines) == 1
q = 1. * u.km
with pytest.raises(TypeError), \
pytest.warns(AstropyWarning, match='is not implemented') as warning_lines:
q >> u.m
assert len(warning_lines) == 1
with pytest.raises(TypeError), \
pytest.warns(AstropyWarning, match='is not implemented') as warning_lines:
q >>= u.m
assert len(warning_lines) == 1
with pytest.raises(TypeError), \
pytest.warns(AstropyWarning, match='is not implemented') as warning_lines:
1. >> q
assert len(warning_lines) == 1
class TestQuantityOperations:
q1 = u.Quantity(11.42, u.meter)
q2 = u.Quantity(8.0, u.centimeter)
def test_addition(self):
# Take units from left object, q1
new_quantity = self.q1 + self.q2
assert new_quantity.value == 11.5
assert new_quantity.unit == u.meter
# Take units from left object, q2
new_quantity = self.q2 + self.q1
assert new_quantity.value == 1150.0
assert new_quantity.unit == u.centimeter
new_q = u.Quantity(1500.1, u.m) + u.Quantity(13.5, u.km)
assert new_q.unit == u.m
assert new_q.value == 15000.1
def test_subtraction(self):
# Take units from left object, q1
new_quantity = self.q1 - self.q2
assert new_quantity.value == 11.34
assert new_quantity.unit == u.meter
# Take units from left object, q2
new_quantity = self.q2 - self.q1
assert new_quantity.value == -1134.0
assert new_quantity.unit == u.centimeter
def test_multiplication(self):
# Take units from left object, q1
new_quantity = self.q1 * self.q2
assert new_quantity.value == 91.36
assert new_quantity.unit == (u.meter * u.centimeter)
# Take units from left object, q2
new_quantity = self.q2 * self.q1
assert new_quantity.value == 91.36
assert new_quantity.unit == (u.centimeter * u.meter)
# Multiply with a number
new_quantity = 15. * self.q1
assert new_quantity.value == 171.3
assert new_quantity.unit == u.meter
# Multiply with a number
new_quantity = self.q1 * 15.
assert new_quantity.value == 171.3
assert new_quantity.unit == u.meter
def test_division(self):
# Take units from left object, q1
new_quantity = self.q1 / self.q2
assert_array_almost_equal(new_quantity.value, 1.4275, decimal=5)
assert new_quantity.unit == (u.meter / u.centimeter)
# Take units from left object, q2
new_quantity = self.q2 / self.q1
assert_array_almost_equal(new_quantity.value, 0.70052539404553416,
decimal=16)
assert new_quantity.unit == (u.centimeter / u.meter)
q1 = u.Quantity(11.4, unit=u.meter)
q2 = u.Quantity(10.0, unit=u.second)
new_quantity = q1 / q2
assert_array_almost_equal(new_quantity.value, 1.14, decimal=10)
assert new_quantity.unit == (u.meter / u.second)
# divide with a number
new_quantity = self.q1 / 10.
assert new_quantity.value == 1.142
assert new_quantity.unit == u.meter
# divide with a number
new_quantity = 11.42 / self.q1
assert new_quantity.value == 1.
assert new_quantity.unit == u.Unit("1/m")
def test_commutativity(self):
"""Regression test for issue #587."""
new_q = u.Quantity(11.42, 'm*s')
assert self.q1 * u.s == u.s * self.q1 == new_q
assert self.q1 / u.s == u.Quantity(11.42, 'm/s')
assert u.s / self.q1 == u.Quantity(1 / 11.42, 's/m')
def test_power(self):
# raise quantity to a power
new_quantity = self.q1 ** 2
assert_array_almost_equal(new_quantity.value, 130.4164, decimal=5)
assert new_quantity.unit == u.Unit("m^2")
new_quantity = self.q1 ** 3
assert_array_almost_equal(new_quantity.value, 1489.355288, decimal=7)
assert new_quantity.unit == u.Unit("m^3")
def test_matrix_multiplication(self):
a = np.eye(3)
q = a * u.m
result1 = q @ a
assert np.all(result1 == q)
result2 = a @ q
assert np.all(result2 == q)
result3 = q @ q
assert np.all(result3 == a * u.m ** 2)
# less trivial case.
q2 = np.array([[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]],
[[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]]]) / u.s
result4 = q @ q2
assert np.all(result4 == np.matmul(a, q2.value) * q.unit * q2.unit)
def test_unary(self):
# Test the minus unary operator
new_quantity = -self.q1
assert new_quantity.value == -self.q1.value
assert new_quantity.unit == self.q1.unit
new_quantity = -(-self.q1)
assert new_quantity.value == self.q1.value
assert new_quantity.unit == self.q1.unit
# Test the plus unary operator
new_quantity = +self.q1
assert new_quantity.value == self.q1.value
assert new_quantity.unit == self.q1.unit
def test_abs(self):
q = 1. * u.m / u.s
new_quantity = abs(q)
assert new_quantity.value == q.value
assert new_quantity.unit == q.unit
q = -1. * u.m / u.s
new_quantity = abs(q)
assert new_quantity.value == -q.value
assert new_quantity.unit == q.unit
def test_incompatible_units(self):
""" When trying to add or subtract units that aren't compatible, throw an error """
q1 = u.Quantity(11.412, unit=u.meter)
q2 = u.Quantity(21.52, unit=u.second)
with pytest.raises(u.UnitsError):
q1 + q2
def test_non_number_type(self):
q1 = u.Quantity(11.412, unit=u.meter)
with pytest.raises(TypeError) as exc:
q1 + {'a': 1}
assert exc.value.args[0].startswith(
"Unsupported operand type(s) for ufunc add:")
with pytest.raises(TypeError):
q1 + u.meter
def test_dimensionless_operations(self):
# test conversion to dimensionless
dq = 3. * u.m / u.km
dq1 = dq + 1. * u.mm / u.km
assert dq1.value == 3.001
assert dq1.unit == dq.unit
dq2 = dq + 1.
assert dq2.value == 1.003
assert dq2.unit == u.dimensionless_unscaled
# this test will check that operations with dimensionless Quantities
# don't work
with pytest.raises(u.UnitsError):
self.q1 + u.Quantity(0.1, unit=u.Unit(""))
with pytest.raises(u.UnitsError):
self.q1 - u.Quantity(0.1, unit=u.Unit(""))
# and test that scaling of integers works
q = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int)
q2 = q + np.array([4, 5, 6])
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value, np.array([4.001, 5.002, 6.003]))
# but not if doing it inplace
with pytest.raises(TypeError):
q += np.array([1, 2, 3])
# except if it is actually possible
q = np.array([1, 2, 3]) * u.km / u.m
q += np.array([4, 5, 6])
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == np.array([1004, 2005, 3006]))
def test_complicated_operation(self):
""" Perform a more complicated test """
from astropy.units import imperial
# Multiple units
distance = u.Quantity(15., u.meter)
time = u.Quantity(11., u.second)
velocity = (distance / time).to(imperial.mile / u.hour)
assert_array_almost_equal(
velocity.value, 3.05037, decimal=5)
G = u.Quantity(6.673E-11, u.m ** 3 / u.kg / u.s ** 2)
_ = ((1. / (4. * np.pi * G)).to(u.pc ** -3 / u.s ** -2 * u.kg))
# Area
side1 = u.Quantity(11., u.centimeter)
side2 = u.Quantity(7., u.centimeter)
area = side1 * side2
assert_array_almost_equal(area.value, 77., decimal=15)
assert area.unit == u.cm * u.cm
def test_comparison(self):
# equality/ non-equality is straightforward for quantity objects
assert (1 / (u.cm * u.cm)) == 1 * u.cm ** -2
assert 1 * u.m == 100 * u.cm
assert 1 * u.m != 1 * u.cm
# when one is a unit, Quantity does not know what to do,
# but unit is fine with it, so it still works
unit = u.cm**3
q = 1. * unit
assert q.__eq__(unit) is NotImplemented
assert unit.__eq__(q) is True
assert q == unit
q = 1000. * u.mm**3
assert q == unit
# mismatched types should never work
assert not 1. * u.cm == 1.
assert 1. * u.cm != 1.
# comparison with zero should raise a deprecation warning
for quantity in (1. * u.cm, 1. * u.dimensionless_unscaled):
with pytest.warns(AstropyDeprecationWarning, match='The truth value of '
'a Quantity is ambiguous. In the future this will '
'raise a ValueError.'):
bool(quantity)
def test_numeric_converters(self):
# float, int, long, and __index__ should only work for single
# quantities, of appropriate type, and only if they are dimensionless.
# for index, this should be unscaled as well
# (Check on __index__ is also a regression test for #1557)
# quantities with units should never convert, or be usable as an index
q1 = u.Quantity(1, u.m)
converter_err_msg = ("only dimensionless scalar quantities "
"can be converted to Python scalars")
index_err_msg = ("only integer dimensionless scalar quantities "
"can be converted to a Python index")
with pytest.raises(TypeError) as exc:
float(q1)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
int(q1)
assert exc.value.args[0] == converter_err_msg
# We used to test `q1 * ['a', 'b', 'c'] here, but that that worked
# at all was a really odd confluence of bugs. Since it doesn't work
# in numpy >=1.10 any more, just go directly for `__index__` (which
# makes the test more similar to the `int`, `long`, etc., tests).
with pytest.raises(TypeError) as exc:
q1.__index__()
assert exc.value.args[0] == index_err_msg
# dimensionless but scaled is OK, however
q2 = u.Quantity(1.23, u.m / u.km)
assert float(q2) == float(q2.to_value(u.dimensionless_unscaled))
assert int(q2) == int(q2.to_value(u.dimensionless_unscaled))
with pytest.raises(TypeError) as exc:
q2.__index__()
assert exc.value.args[0] == index_err_msg
# dimensionless unscaled is OK, though for index needs to be int
q3 = u.Quantity(1.23, u.dimensionless_unscaled)
assert float(q3) == 1.23
assert int(q3) == 1
with pytest.raises(TypeError) as exc:
q3.__index__()
assert exc.value.args[0] == index_err_msg
# integer dimensionless unscaled is good for all
q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int)
assert float(q4) == 2.
assert int(q4) == 2
assert q4.__index__() == 2
# but arrays are not OK
q5 = u.Quantity([1, 2], u.m)
with pytest.raises(TypeError) as exc:
float(q5)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
int(q5)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
q5.__index__()
assert exc.value.args[0] == index_err_msg
# See https://github.com/numpy/numpy/issues/5074
# It seems unlikely this will be resolved, so xfail'ing it.
@pytest.mark.xfail(reason="list multiplication only works for numpy <=1.10")
def test_numeric_converter_to_index_in_practice(self):
"""Test that use of __index__ actually works."""
q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int)
assert q4 * ['a', 'b', 'c'] == ['a', 'b', 'c', 'a', 'b', 'c']
def test_array_converters(self):
# Scalar quantity
q = u.Quantity(1.23, u.m)
assert np.all(np.array(q) == np.array([1.23]))
# Array quantity
q = u.Quantity([1., 2., 3.], u.m)
assert np.all(np.array(q) == np.array([1., 2., 3.]))
def test_quantity_conversion():
q1 = u.Quantity(0.1, unit=u.meter)
value = q1.value
assert value == 0.1
value_in_km = q1.to_value(u.kilometer)
assert value_in_km == 0.0001
new_quantity = q1.to(u.kilometer)
assert new_quantity.value == 0.0001
with pytest.raises(u.UnitsError):
q1.to(u.zettastokes)
with pytest.raises(u.UnitsError):
q1.to_value(u.zettastokes)
def test_quantity_value_views():
q1 = u.Quantity([1., 2.], unit=u.meter)
# views if the unit is the same.
v1 = q1.value
v1[0] = 0.
assert np.all(q1 == [0., 2.] * u.meter)
v2 = q1.to_value()
v2[1] = 3.
assert np.all(q1 == [0., 3.] * u.meter)
v3 = q1.to_value('m')
v3[0] = 1.
assert np.all(q1 == [1., 3.] * u.meter)
q2 = q1.to('m', copy=False)
q2[0] = 2 * u.meter
assert np.all(q1 == [2., 3.] * u.meter)
v4 = q1.to_value('cm')
v4[0] = 0.
# copy if different unit.
assert np.all(q1 == [2., 3.] * u.meter)
def test_quantity_conversion_with_equiv():
q1 = u.Quantity(0.1, unit=u.meter)
v2 = q1.to_value(u.Hz, equivalencies=u.spectral())
assert_allclose(v2, 2997924580.0)
q2 = q1.to(u.Hz, equivalencies=u.spectral())
assert_allclose(q2.value, v2)
q1 = u.Quantity(0.4, unit=u.arcsecond)
v2 = q1.to_value(u.au, equivalencies=u.parallax())
q2 = q1.to(u.au, equivalencies=u.parallax())
v3 = q2.to_value(u.arcminute, equivalencies=u.parallax())
q3 = q2.to(u.arcminute, equivalencies=u.parallax())
assert_allclose(v2, 515662.015)
assert_allclose(q2.value, v2)
assert q2.unit == u.au
assert_allclose(v3, 0.0066666667)
assert_allclose(q3.value, v3)
assert q3.unit == u.arcminute
def test_quantity_conversion_equivalency_passed_on():
class MySpectral(u.Quantity):
_equivalencies = u.spectral()
def __quantity_view__(self, obj, unit):
return obj.view(MySpectral)
def __quantity_instance__(self, *args, **kwargs):
return MySpectral(*args, **kwargs)
q1 = MySpectral([1000, 2000], unit=u.Hz)
q2 = q1.to(u.nm)
assert q2.unit == u.nm
q3 = q2.to(u.Hz)
assert q3.unit == u.Hz
assert_allclose(q3.value, q1.value)
q4 = MySpectral([1000, 2000], unit=u.nm)
q5 = q4.to(u.Hz).to(u.nm)
assert q5.unit == u.nm
assert_allclose(q4.value, q5.value)
# Regression test for issue #2315, divide-by-zero error when examining 0*unit
def test_self_equivalency():
assert u.deg.is_equivalent(0*u.radian)
assert u.deg.is_equivalent(1*u.radian)
def test_si():
q1 = 10. * u.m * u.s ** 2 / (200. * u.ms) ** 2 # 250 meters
assert q1.si.value == 250
assert q1.si.unit == u.m
q = 10. * u.m # 10 meters
assert q.si.value == 10
assert q.si.unit == u.m
q = 10. / u.m # 10 1 / meters
assert q.si.value == 10
assert q.si.unit == (1 / u.m)
def test_cgs():
q1 = 10. * u.cm * u.s ** 2 / (200. * u.ms) ** 2 # 250 centimeters
assert q1.cgs.value == 250
assert q1.cgs.unit == u.cm
q = 10. * u.m # 10 centimeters
assert q.cgs.value == 1000
assert q.cgs.unit == u.cm
q = 10. / u.cm # 10 1 / centimeters
assert q.cgs.value == 10
assert q.cgs.unit == (1 / u.cm)
q = 10. * u.Pa # 10 pascals
assert q.cgs.value == 100
assert q.cgs.unit == u.barye
class TestQuantityComparison:
def test_quantity_equality(self):
assert u.Quantity(1000, unit='m') == u.Quantity(1, unit='km')
assert not (u.Quantity(1, unit='m') == u.Quantity(1, unit='km'))
# for ==, !=, return False, True if units do not match
assert (u.Quantity(1100, unit=u.m) != u.Quantity(1, unit=u.s)) is True
assert (u.Quantity(1100, unit=u.m) == u.Quantity(1, unit=u.s)) is False
assert (u.Quantity(0, unit=u.m) == u.Quantity(0, unit=u.s)) is False
# But allow comparison with 0, +/-inf if latter unitless
assert u.Quantity(0, u.m) == 0.
assert u.Quantity(1, u.m) != 0.
assert u.Quantity(1, u.m) != np.inf
assert u.Quantity(np.inf, u.m) == np.inf
def test_quantity_equality_array(self):
a = u.Quantity([0., 1., 1000.], u.m)
b = u.Quantity(1., u.km)
eq = a == b
ne = a != b
assert np.all(eq == [False, False, True])
assert np.all(eq != ne)
# For mismatched units, we should just get True, False
c = u.Quantity(1., u.s)
eq = a == c
ne = a != c
assert eq is False
assert ne is True
# Constants are treated as dimensionless, so False too.
eq = a == 1.
ne = a != 1.
assert eq is False
assert ne is True
# But 0 can have any units, so we can compare.
eq = a == 0
ne = a != 0
assert np.all(eq == [True, False, False])
assert np.all(eq != ne)
# But we do not extend that to arrays; they should have the same unit.
d = np.array([0, 1., 1000.])
eq = a == d
ne = a != d
assert eq is False
assert ne is True
def test_quantity_comparison(self):
assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.kilometer)
assert u.Quantity(900, unit=u.meter) < u.Quantity(1, unit=u.kilometer)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.second)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) < u.Quantity(1, unit=u.second)
assert u.Quantity(1100, unit=u.meter) >= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(1000, unit=u.meter) >= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(900, unit=u.meter) <= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(1000, unit=u.meter) <= u.Quantity(1, unit=u.kilometer)
with pytest.raises(u.UnitsError):
assert u.Quantity(
1100, unit=u.meter) >= u.Quantity(1, unit=u.second)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) <= u.Quantity(1, unit=u.second)
assert u.Quantity(1200, unit=u.meter) != u.Quantity(1, unit=u.kilometer)
class TestQuantityDisplay:
scalarintq = u.Quantity(1, unit='m', dtype=int)
scalarfloatq = u.Quantity(1.3, unit='m')
arrq = u.Quantity([1, 2.3, 8.9], unit='m')
scalar_complex_q = u.Quantity(complex(1.0, 2.0))
scalar_big_complex_q = u.Quantity(complex(1.0, 2.0e27) * 1e25)
scalar_big_neg_complex_q = u.Quantity(complex(-1.0, -2.0e27) * 1e36)
arr_complex_q = u.Quantity(np.arange(3) * (complex(-1.0, -2.0e27) * 1e36))
big_arr_complex_q = u.Quantity(np.arange(125) * (complex(-1.0, -2.0e27) * 1e36))
def test_dimensionless_quantity_repr(self):
q2 = u.Quantity(1., unit='m-1')
q3 = u.Quantity(1, unit='m-1', dtype=int)
assert repr(self.scalarintq * q2) == "<Quantity 1.>"
assert repr(self.arrq * q2) == "<Quantity [1. , 2.3, 8.9]>"
assert repr(self.scalarintq * q3) == "<Quantity 1>"
def test_dimensionless_quantity_str(self):
q2 = u.Quantity(1., unit='m-1')
q3 = u.Quantity(1, unit='m-1', dtype=int)
assert str(self.scalarintq * q2) == "1.0"
assert str(self.scalarintq * q3) == "1"
assert str(self.arrq * q2) == "[1. 2.3 8.9]"
def test_dimensionless_quantity_format(self):
q1 = u.Quantity(3.14)
assert format(q1, '.2f') == '3.14'
def test_scalar_quantity_str(self):
assert str(self.scalarintq) == "1 m"
assert str(self.scalarfloatq) == "1.3 m"
def test_scalar_quantity_repr(self):
assert repr(self.scalarintq) == "<Quantity 1 m>"
assert repr(self.scalarfloatq) == "<Quantity 1.3 m>"
def test_array_quantity_str(self):
assert str(self.arrq) == "[1. 2.3 8.9] m"
def test_array_quantity_repr(self):
assert repr(self.arrq) == "<Quantity [1. , 2.3, 8.9] m>"
def test_scalar_quantity_format(self):
assert format(self.scalarintq, '02d') == "01 m"
assert format(self.scalarfloatq, '.1f') == "1.3 m"
assert format(self.scalarfloatq, '.0f') == "1 m"
def test_uninitialized_unit_format(self):
bad_quantity = np.arange(10.).view(u.Quantity)
assert str(bad_quantity).endswith(_UNIT_NOT_INITIALISED)
assert repr(bad_quantity).endswith(_UNIT_NOT_INITIALISED + '>')
def test_to_string(self):
qscalar = u.Quantity(1.5e14, 'm/s')
# __str__ is the default `format`
assert str(qscalar) == qscalar.to_string()
res = 'Quantity as KMS: 150000000000.0 km / s'
assert f"Quantity as KMS: {qscalar.to_string(unit=u.km / u.s)}" == res
# With precision set
res = 'Quantity as KMS: 1.500e+11 km / s'
assert f"Quantity as KMS: {qscalar.to_string(precision=3, unit=u.km / u.s)}" == res
res = r'$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$'
assert qscalar.to_string(format="latex") == res
assert qscalar.to_string(format="latex", subfmt="inline") == res
res = r'$\displaystyle 1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$'
assert qscalar.to_string(format="latex", subfmt="display") == res
res = r'$1.5 \times 10^{14} \; \mathrm{m\,s^{-1}}$'
assert qscalar.to_string(format="latex_inline") == res
assert qscalar.to_string(format="latex_inline", subfmt="inline") == res
res = r'$\displaystyle 1.5 \times 10^{14} \; \mathrm{m\,s^{-1}}$'
assert qscalar.to_string(format="latex_inline", subfmt="display") == res
res = '[0 1 2] (Unit not initialised)'
assert np.arange(3).view(u.Quantity).to_string() == res
def test_repr_latex(self):
from astropy.units.quantity import conf
q2scalar = u.Quantity(1.5e14, 'm/s')
assert self.scalarintq._repr_latex_() == r'$1 \; \mathrm{m}$'
assert self.scalarfloatq._repr_latex_() == r'$1.3 \; \mathrm{m}$'
assert (q2scalar._repr_latex_() ==
r'$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$')
assert self.arrq._repr_latex_() == r'$[1,~2.3,~8.9] \; \mathrm{m}$'
# Complex quantities
assert self.scalar_complex_q._repr_latex_() == r'$(1+2i) \; \mathrm{}$'
assert (self.scalar_big_complex_q._repr_latex_() ==
r'$(1 \times 10^{25}+2 \times 10^{52}i) \; \mathrm{}$')
assert (self.scalar_big_neg_complex_q._repr_latex_() ==
r'$(-1 \times 10^{36}-2 \times 10^{63}i) \; \mathrm{}$')
assert (self.arr_complex_q._repr_latex_() ==
(r'$[(0-0i),~(-1 \times 10^{36}-2 \times 10^{63}i),'
r'~(-2 \times 10^{36}-4 \times 10^{63}i)] \; \mathrm{}$'))
assert r'\dots' in self.big_arr_complex_q._repr_latex_()
qmed = np.arange(100)*u.m
qbig = np.arange(1000)*u.m
qvbig = np.arange(10000)*1e9*u.m
pops = np.get_printoptions()
oldlat = conf.latex_array_threshold
try:
# check precision behavior
q = u.Quantity(987654321.123456789, 'm/s')
qa = np.array([7.89123, 123456789.987654321, 0]) * u.cm
np.set_printoptions(precision=8)
assert q._repr_latex_() == r'$9.8765432 \times 10^{8} \; \mathrm{\frac{m}{s}}$'
assert qa._repr_latex_() == r'$[7.89123,~1.2345679 \times 10^{8},~0] \; \mathrm{cm}$'
np.set_printoptions(precision=2)
assert q._repr_latex_() == r'$9.9 \times 10^{8} \; \mathrm{\frac{m}{s}}$'
assert qa._repr_latex_() == r'$[7.9,~1.2 \times 10^{8},~0] \; \mathrm{cm}$'
# check thresholding behavior
conf.latex_array_threshold = 100 # should be default
lsmed = qmed._repr_latex_()
assert r'\dots' not in lsmed
lsbig = qbig._repr_latex_()
assert r'\dots' in lsbig
lsvbig = qvbig._repr_latex_()
assert r'\dots' in lsvbig
conf.latex_array_threshold = 1001
lsmed = qmed._repr_latex_()
assert r'\dots' not in lsmed
lsbig = qbig._repr_latex_()
assert r'\dots' not in lsbig
lsvbig = qvbig._repr_latex_()
assert r'\dots' in lsvbig
conf.latex_array_threshold = -1 # means use the numpy threshold
np.set_printoptions(threshold=99)
lsmed = qmed._repr_latex_()
assert r'\dots' in lsmed
lsbig = qbig._repr_latex_()
assert r'\dots' in lsbig
lsvbig = qvbig._repr_latex_()
assert r'\dots' in lsvbig
assert lsvbig.endswith(',~1 \\times 10^{13}] \\; \\mathrm{m}$')
finally:
# prevent side-effects from influencing other tests
np.set_printoptions(**pops)
conf.latex_array_threshold = oldlat
qinfnan = [np.inf, -np.inf, np.nan] * u.m
assert qinfnan._repr_latex_() == r'$[\infty,~-\infty,~{\rm NaN}] \; \mathrm{m}$'
def test_decompose():
q1 = 5 * u.N
assert q1.decompose() == (5 * u.kg * u.m * u.s ** -2)
def test_decompose_regression():
"""
Regression test for bug #1163
If decompose was called multiple times on a Quantity with an array and a
scale != 1, the result changed every time. This is because the value was
being referenced not copied, then modified, which changed the original
value.
"""
q = np.array([1, 2, 3]) * u.m / (2. * u.km)
assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))
assert np.all(q == np.array([1, 2, 3]) * u.m / (2. * u.km))
assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))
def test_arrays():
"""
Test using quantites with array values
"""
qsec = u.Quantity(np.arange(10), u.second)
assert isinstance(qsec.value, np.ndarray)
assert not qsec.isscalar
# len and indexing should work for arrays
assert len(qsec) == len(qsec.value)
qsecsub25 = qsec[2:5]
assert qsecsub25.unit == qsec.unit
assert isinstance(qsecsub25, u.Quantity)
assert len(qsecsub25) == 3
# make sure isscalar, len, and indexing behave correctly for non-arrays.
qsecnotarray = u.Quantity(10., u.second)
assert qsecnotarray.isscalar
with pytest.raises(TypeError):
len(qsecnotarray)
with pytest.raises(TypeError):
qsecnotarray[0]
qseclen0array = u.Quantity(np.array(10), u.second, dtype=int)
# 0d numpy array should act basically like a scalar
assert qseclen0array.isscalar
with pytest.raises(TypeError):
len(qseclen0array)
with pytest.raises(TypeError):
qseclen0array[0]
assert isinstance(qseclen0array.value, numbers.Integral)
a = np.array([(1., 2., 3.), (4., 5., 6.), (7., 8., 9.)],
dtype=[('x', float),
('y', float),
('z', float)])
qkpc = u.Quantity(a, u.kpc)
assert not qkpc.isscalar
qkpc0 = qkpc[0]
assert qkpc0.value == a[0]
assert qkpc0.unit == qkpc.unit
assert isinstance(qkpc0, u.Quantity)
assert qkpc0.isscalar
qkpcx = qkpc['x']
assert np.all(qkpcx.value == a['x'])
assert qkpcx.unit == qkpc.unit
assert isinstance(qkpcx, u.Quantity)
assert not qkpcx.isscalar
qkpcx1 = qkpc['x'][1]
assert qkpcx1.unit == qkpc.unit
assert isinstance(qkpcx1, u.Quantity)
assert qkpcx1.isscalar
qkpc1x = qkpc[1]['x']
assert qkpc1x.isscalar
assert qkpc1x == qkpcx1
# can also create from lists, will auto-convert to arrays
qsec = u.Quantity(list(range(10)), u.second)
assert isinstance(qsec.value, np.ndarray)
# quantity math should work with arrays
assert_array_equal((qsec * 2).value, (np.arange(10) * 2))
assert_array_equal((qsec / 2).value, (np.arange(10) / 2))
# quantity addition/subtraction should *not* work with arrays b/c unit
# ambiguous
with pytest.raises(u.UnitsError):
assert_array_equal((qsec + 2).value, (np.arange(10) + 2))
with pytest.raises(u.UnitsError):
assert_array_equal((qsec - 2).value, (np.arange(10) + 2))
# should create by unit multiplication, too
qsec2 = np.arange(10) * u.second
qsec3 = u.second * np.arange(10)
assert np.all(qsec == qsec2)
assert np.all(qsec2 == qsec3)
# make sure numerical-converters fail when arrays are present
with pytest.raises(TypeError):
float(qsec)
with pytest.raises(TypeError):
int(qsec)
def test_array_indexing_slicing():
q = np.array([1., 2., 3.]) * u.m
assert q[0] == 1. * u.m
assert np.all(q[0:2] == u.Quantity([1., 2.], u.m))
def test_array_setslice():
q = np.array([1., 2., 3.]) * u.m
q[1:2] = np.array([400.]) * u.cm
assert np.all(q == np.array([1., 4., 3.]) * u.m)
def test_inverse_quantity():
"""
Regression test from issue #679
"""
q = u.Quantity(4., u.meter / u.second)
qot = q / 2
toq = 2 / q
npqot = q / np.array(2)
assert npqot.value == 2.0
assert npqot.unit == (u.meter / u.second)
assert qot.value == 2.0
assert qot.unit == (u.meter / u.second)
assert toq.value == 0.5
assert toq.unit == (u.second / u.meter)
def test_quantity_mutability():
q = u.Quantity(9.8, u.meter / u.second / u.second)
with pytest.raises(AttributeError):
q.value = 3
with pytest.raises(AttributeError):
q.unit = u.kg
def test_quantity_initialized_with_quantity():
q1 = u.Quantity(60, u.second)
q2 = u.Quantity(q1, u.minute)
assert q2.value == 1
q3 = u.Quantity([q1, q2], u.second)
assert q3[0].value == 60
assert q3[1].value == 60
q4 = u.Quantity([q2, q1])
assert q4.unit == q2.unit
assert q4[0].value == 1
assert q4[1].value == 1
def test_quantity_string_unit():
q1 = 1. * u.m / 's'
assert q1.value == 1
assert q1.unit == (u.m / u.s)
q2 = q1 * "m"
assert q2.unit == ((u.m * u.m) / u.s)
def test_quantity_invalid_unit_string():
with pytest.raises(ValueError):
"foo" * u.m
def test_implicit_conversion():
q = u.Quantity(1.0, u.meter)
# Manually turn this on to simulate what might happen in a subclass
q._include_easy_conversion_members = True
assert_allclose(q.centimeter, 100)
assert_allclose(q.cm, 100)
assert_allclose(q.parsec, 3.240779289469756e-17)
def test_implicit_conversion_autocomplete():
q = u.Quantity(1.0, u.meter)
# Manually turn this on to simulate what might happen in a subclass
q._include_easy_conversion_members = True
q.foo = 42
attrs = dir(q)
assert 'centimeter' in attrs
assert 'cm' in attrs
assert 'parsec' in attrs
assert 'foo' in attrs
assert 'to' in attrs
assert 'value' in attrs
# Something from the base class, object
assert '__setattr__' in attrs
with pytest.raises(AttributeError):
q.l
def test_quantity_iterability():
"""Regressiont est for issue #878.
Scalar quantities should not be iterable and should raise a type error on
iteration.
"""
q1 = [15.0, 17.0] * u.m
assert isiterable(q1)
q2 = next(iter(q1))
assert q2 == 15.0 * u.m
assert not isiterable(q2)
pytest.raises(TypeError, iter, q2)
def test_copy():
q1 = u.Quantity(np.array([[1., 2., 3.], [4., 5., 6.]]), unit=u.m)
q2 = q1.copy()
assert np.all(q1.value == q2.value)
assert q1.unit == q2.unit
assert q1.dtype == q2.dtype
assert q1.value is not q2.value
q3 = q1.copy(order='F')
assert q3.flags['F_CONTIGUOUS']
assert np.all(q1.value == q3.value)
assert q1.unit == q3.unit
assert q1.dtype == q3.dtype
assert q1.value is not q3.value
q4 = q1.copy(order='C')
assert q4.flags['C_CONTIGUOUS']
assert np.all(q1.value == q4.value)
assert q1.unit == q4.unit
assert q1.dtype == q4.dtype
assert q1.value is not q4.value
def test_deepcopy():
q1 = u.Quantity(np.array([1., 2., 3.]), unit=u.m)
q2 = copy.deepcopy(q1)
assert isinstance(q2, u.Quantity)
assert np.all(q1.value == q2.value)
assert q1.unit == q2.unit
assert q1.dtype == q2.dtype
assert q1.value is not q2.value
def test_equality_numpy_scalar():
"""
A regression test to ensure that numpy scalars are correctly compared
(which originally failed due to the lack of ``__array_priority__``).
"""
assert 10 != 10. * u.m
assert np.int64(10) != 10 * u.m
assert 10 * u.m != np.int64(10)
def test_quantity_pickelability():
"""
Testing pickleability of quantity
"""
q1 = np.arange(10) * u.m
q2 = pickle.loads(pickle.dumps(q1))
assert np.all(q1.value == q2.value)
assert q1.unit.is_equivalent(q2.unit)
assert q1.unit == q2.unit
def test_quantity_initialisation_from_string():
q = u.Quantity('1')
assert q.unit == u.dimensionless_unscaled
assert q.value == 1.
q = u.Quantity('1.5 m/s')
assert q.unit == u.m/u.s
assert q.value == 1.5
assert u.Unit(q) == u.Unit('1.5 m/s')
q = u.Quantity('.5 m')
assert q == u.Quantity(0.5, u.m)
q = u.Quantity('-1e1km')
assert q == u.Quantity(-10, u.km)
q = u.Quantity('-1e+1km')
assert q == u.Quantity(-10, u.km)
q = u.Quantity('+.5km')
assert q == u.Quantity(.5, u.km)
q = u.Quantity('+5e-1km')
assert q == u.Quantity(.5, u.km)
q = u.Quantity('5', u.m)
assert q == u.Quantity(5., u.m)
q = u.Quantity('5 km', u.m)
assert q.value == 5000.
assert q.unit == u.m
q = u.Quantity('5Em')
assert q == u.Quantity(5., u.Em)
with pytest.raises(TypeError):
u.Quantity('')
with pytest.raises(TypeError):
u.Quantity('m')
with pytest.raises(TypeError):
u.Quantity('1.2.3 deg')
with pytest.raises(TypeError):
u.Quantity('1+deg')
with pytest.raises(TypeError):
u.Quantity('1-2deg')
with pytest.raises(TypeError):
u.Quantity('1.2e-13.3m')
with pytest.raises(TypeError):
u.Quantity(['5'])
with pytest.raises(TypeError):
u.Quantity(np.array(['5']))
with pytest.raises(ValueError):
u.Quantity('5E')
with pytest.raises(ValueError):
u.Quantity('5 foo')
def test_unsupported():
q1 = np.arange(10) * u.m
with pytest.raises(TypeError):
np.bitwise_and(q1, q1)
def test_unit_identity():
q = 1.0 * u.hour
assert q.unit is u.hour
def test_quantity_to_view():
q1 = np.array([1000, 2000]) * u.m
q2 = q1.to(u.km)
assert q1.value[0] == 1000
assert q2.value[0] == 1
def test_quantity_tuple_power():
with pytest.raises(ValueError):
(5.0 * u.m) ** (1, 2)
def test_quantity_fraction_power():
q = (25.0 * u.m**2) ** Fraction(1, 2)
assert q.value == 5.
assert q.unit == u.m
# Regression check to ensure we didn't create an object type by raising
# the value of the quantity to a Fraction. [#3922]
assert q.dtype.kind == 'f'
def test_quantity_from_table():
"""
Checks that units from tables are respected when converted to a Quantity.
This also generically checks the use of *anything* with a `unit` attribute
passed into Quantity
"""
from astropy.table import Table
t = Table(data=[np.arange(5), np.arange(5)], names=['a', 'b'])
t['a'].unit = u.kpc
qa = u.Quantity(t['a'])
assert qa.unit == u.kpc
assert_array_equal(qa.value, t['a'])
qb = u.Quantity(t['b'])
assert qb.unit == u.dimensionless_unscaled
assert_array_equal(qb.value, t['b'])
# This does *not* auto-convert, because it's not necessarily obvious that's
# desired. Instead we revert to standard `Quantity` behavior
qap = u.Quantity(t['a'], u.pc)
assert qap.unit == u.pc
assert_array_equal(qap.value, t['a'] * 1000)
qbp = u.Quantity(t['b'], u.pc)
assert qbp.unit == u.pc
assert_array_equal(qbp.value, t['b'])
# Also check with a function unit (regression test for gh-8430)
t['a'].unit = u.dex(u.cm/u.s**2)
fq = u.Dex(t['a'])
assert fq.unit == u.dex(u.cm/u.s**2)
assert_array_equal(fq.value, t['a'])
fq2 = u.Quantity(t['a'], subok=True)
assert isinstance(fq2, u.Dex)
assert fq2.unit == u.dex(u.cm/u.s**2)
assert_array_equal(fq2.value, t['a'])
with pytest.raises(u.UnitTypeError):
u.Quantity(t['a'])
def test_assign_slice_with_quantity_like():
# Regression tests for gh-5961
from astropy.table import Column, Table
# first check directly that we can use a Column to assign to a slice.
c = Column(np.arange(10.), unit=u.mm)
q = u.Quantity(c)
q[:2] = c[:2]
# next check that we do not fail the original problem.
t = Table()
t['x'] = np.arange(10) * u.mm
t['y'] = np.ones(10) * u.mm
assert type(t['x']) is Column
xy = np.vstack([t['x'], t['y']]).T * u.mm
ii = [0, 2, 4]
assert xy[ii, 0].unit == t['x'][ii].unit
# should not raise anything
xy[ii, 0] = t['x'][ii]
def test_insert():
"""
Test Quantity.insert method. This does not test the full capabilities
of the underlying np.insert, but hits the key functionality for
Quantity.
"""
q = [1, 2] * u.m
# Insert a compatible float with different units
q2 = q.insert(0, 1 * u.km)
assert np.all(q2.value == [1000, 1, 2])
assert q2.unit is u.m
assert q2.dtype.kind == 'f'
if minversion(np, '1.8.0'):
q2 = q.insert(1, [1, 2] * u.km)
assert np.all(q2.value == [1, 1000, 2000, 2])
assert q2.unit is u.m
# Cannot convert 1.5 * u.s to m
with pytest.raises(u.UnitsError):
q.insert(1, 1.5 * u.s)
# Tests with multi-dim quantity
q = [[1, 2], [3, 4]] * u.m
q2 = q.insert(1, [10, 20] * u.m, axis=0)
assert np.all(q2.value == [[1, 2],
[10, 20],
[3, 4]])
q2 = q.insert(1, [10, 20] * u.m, axis=1)
assert np.all(q2.value == [[1, 10, 2],
[3, 20, 4]])
q2 = q.insert(1, 10 * u.m, axis=1)
assert np.all(q2.value == [[1, 10, 2],
[3, 10, 4]])
def test_repr_array_of_quantity():
"""
Test print/repr of object arrays of Quantity objects with different
units.
Regression test for the issue first reported in
https://github.com/astropy/astropy/issues/3777
"""
a = np.array([1 * u.m, 2 * u.s], dtype=object)
assert repr(a) == 'array([<Quantity 1. m>, <Quantity 2. s>], dtype=object)'
assert str(a) == '[<Quantity 1. m> <Quantity 2. s>]'
class TestSpecificTypeQuantity:
def setup(self):
class Length(u.SpecificTypeQuantity):
_equivalent_unit = u.m
class Length2(Length):
_default_unit = u.m
class Length3(Length):
_unit = u.m
self.Length = Length
self.Length2 = Length2
self.Length3 = Length3
def test_creation(self):
l = self.Length(np.arange(10.)*u.km)
assert type(l) is self.Length
with pytest.raises(u.UnitTypeError):
self.Length(np.arange(10.) * u.hour)
with pytest.raises(u.UnitTypeError):
self.Length(np.arange(10.))
l2 = self.Length2(np.arange(5.))
assert type(l2) is self.Length2
assert l2._default_unit is self.Length2._default_unit
with pytest.raises(u.UnitTypeError):
self.Length3(np.arange(10.))
def test_view(self):
l = (np.arange(5.) * u.km).view(self.Length)
assert type(l) is self.Length
with pytest.raises(u.UnitTypeError):
(np.arange(5.) * u.s).view(self.Length)
v = np.arange(5.).view(self.Length)
assert type(v) is self.Length
assert v._unit is None
l3 = np.ones((2, 2)).view(self.Length3)
assert type(l3) is self.Length3
assert l3.unit is self.Length3._unit
def test_operation_precedence_and_fallback(self):
l = self.Length(np.arange(5.)*u.cm)
sum1 = l + 1.*u.m
assert type(sum1) is self.Length
sum2 = 1.*u.km + l
assert type(sum2) is self.Length
sum3 = l + l
assert type(sum3) is self.Length
res1 = l * (1.*u.m)
assert type(res1) is u.Quantity
res2 = l * l
assert type(res2) is u.Quantity
def test_unit_class_override():
class MyQuantity(u.Quantity):
pass
my_unit = u.Unit("my_deg", u.deg)
my_unit._quantity_class = MyQuantity
q1 = u.Quantity(1., my_unit)
assert type(q1) is u.Quantity
q2 = u.Quantity(1., my_unit, subok=True)
assert type(q2) is MyQuantity
class QuantityMimic:
def __init__(self, value, unit):
self.value = value
self.unit = unit
def __array__(self):
return np.array(self.value)
class QuantityMimic2(QuantityMimic):
def to(self, unit):
return u.Quantity(self.value, self.unit).to(unit)
def to_value(self, unit):
return u.Quantity(self.value, self.unit).to_value(unit)
class TestQuantityMimics:
"""Test Quantity Mimics that are not ndarray subclasses."""
@pytest.mark.parametrize('Mimic', (QuantityMimic, QuantityMimic2))
def test_mimic_input(self, Mimic):
value = np.arange(10.)
mimic = Mimic(value, u.m)
q = u.Quantity(mimic)
assert q.unit == u.m
assert np.all(q.value == value)
q2 = u.Quantity(mimic, u.cm)
assert q2.unit == u.cm
assert np.all(q2.value == 100 * value)
@pytest.mark.parametrize('Mimic', (QuantityMimic, QuantityMimic2))
def test_mimic_setting(self, Mimic):
mimic = Mimic([1., 2.], u.m)
q = u.Quantity(np.arange(10.), u.cm)
q[8:] = mimic
assert np.all(q[:8].value == np.arange(8.))
assert np.all(q[8:].value == [100., 200.])
def test_mimic_function_unit(self):
mimic = QuantityMimic([1., 2.], u.dex(u.cm/u.s**2))
d = u.Dex(mimic)
assert isinstance(d, u.Dex)
assert d.unit == u.dex(u.cm/u.s**2)
assert np.all(d.value == [1., 2.])
q = u.Quantity(mimic, subok=True)
assert isinstance(q, u.Dex)
assert q.unit == u.dex(u.cm/u.s**2)
assert np.all(q.value == [1., 2.])
with pytest.raises(u.UnitTypeError):
u.Quantity(mimic)
def test_masked_quantity_str_repr():
"""Ensure we don't break masked Quantity representation."""
# Really, masked quantities do not work well, but at least let the
# basics work.
masked_quantity = np.ma.array([1, 2, 3, 4] * u.kg,
mask=[True, False, True, False])
str(masked_quantity)
repr(masked_quantity)
class TestQuantitySubclassAboveAndBelow:
@classmethod
def setup_class(self):
class MyArray(np.ndarray):
def __array_finalize__(self, obj):
super_array_finalize = super().__array_finalize__
if super_array_finalize is not None:
super_array_finalize(obj)
if hasattr(obj, 'my_attr'):
self.my_attr = obj.my_attr
self.MyArray = MyArray
self.MyQuantity1 = type('MyQuantity1', (u.Quantity, MyArray),
dict(my_attr='1'))
self.MyQuantity2 = type('MyQuantity2', (MyArray, u.Quantity),
dict(my_attr='2'))
def test_setup(self):
mq1 = self.MyQuantity1(10, u.m)
assert isinstance(mq1, self.MyQuantity1)
assert mq1.my_attr == '1'
assert mq1.unit is u.m
mq2 = self.MyQuantity2(10, u.m)
assert isinstance(mq2, self.MyQuantity2)
assert mq2.my_attr == '2'
assert mq2.unit is u.m
def test_attr_propagation(self):
mq1 = self.MyQuantity1(10, u.m)
mq12 = self.MyQuantity2(mq1)
assert isinstance(mq12, self.MyQuantity2)
assert not isinstance(mq12, self.MyQuantity1)
assert mq12.my_attr == '1'
assert mq12.unit is u.m
mq2 = self.MyQuantity2(10, u.m)
mq21 = self.MyQuantity1(mq2)
assert isinstance(mq21, self.MyQuantity1)
assert not isinstance(mq21, self.MyQuantity2)
assert mq21.my_attr == '2'
assert mq21.unit is u.m
|
729512e71cc2c514053464f4552b2ab8d833afa03425c62ae58a06794113424d | # coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test Structured units and quantities specifically with the ERFA ufuncs.
"""
import erfa
import numpy as np
import pytest
from erfa import ufunc as erfa_ufunc
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.introspection import minversion
ERFA_LE_2_0_0 = not minversion(erfa, '2.0.0.1')
class TestPVUfuncs:
def setup_class(self):
self.pv_unit = u.Unit('AU,AU/day')
self.pv_value = np.array([([1., 0., 0.], [0., 0.0125, 0.]),
([0., 1., 0.], [-.0125, 0., 0.])],
dtype=erfa_ufunc.dt_pv)
self.pv = self.pv_value << self.pv_unit
def test_cpv(self):
pv_copy = erfa_ufunc.cpv(self.pv)
assert_array_equal(pv_copy, self.pv)
assert not np.may_share_memory(pv_copy, self.pv)
def test_p2pv(self):
p2pv = erfa_ufunc.p2pv(self.pv['p'])
assert_array_equal(p2pv['p'], self.pv['p'])
assert_array_equal(p2pv['v'], np.zeros(self.pv.shape+(3,), float) << u.m/u.s)
@pytest.mark.xfail(erfa.__version__ <= '2.0.0',
reason='erfa bug; https://github.com/liberfa/pyerfa/issues/70)')
def test_p2pv_inplace(self):
# TODO: fix np.zeros_like.
out = np.zeros_like(self.pv_value) << self.pv_unit
p2pv = erfa_ufunc.p2pv(self.pv['p'], out=out)
assert out is p2pv
assert_array_equal(p2pv['p'], self.pv['p'])
assert_array_equal(p2pv['v'], np.zeros(self.pv.shape+(3,), float) << u.m/u.s)
def test_pv2p(self):
p = erfa_ufunc.pv2p(self.pv)
assert_array_equal(p, self.pv['p'])
out = np.zeros_like(p)
p2 = erfa_ufunc.pv2p(self.pv, out=out)
assert out is p2
assert_array_equal(p2, self.pv['p'])
def test_pv2s(self):
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(self.pv)
assert theta.unit == u.radian
assert_quantity_allclose(theta, [0, 90] * u.deg) # longitude
assert phi.unit == u.radian
assert_array_equal(phi.value, np.zeros(self.pv.shape)) # latitude
assert r.unit == u.AU
assert_array_equal(r.value, np.ones(self.pv.shape))
assert td.unit == u.radian/u.day
assert_array_equal(td.value, np.array([0.0125]*2))
assert pd.unit == u.radian/u.day
assert_array_equal(pd.value, np.zeros(self.pv.shape))
assert rd.unit == u.AU/u.day
assert_array_equal(rd.value, np.zeros(self.pv.shape))
def test_pv2s_non_standard_units(self):
pv = self.pv_value << u.Unit('Pa,Pa/m')
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(pv)
assert theta.unit == u.radian
assert_quantity_allclose(theta, [0, 90] * u.deg) # longitude
assert phi.unit == u.radian
assert_array_equal(phi.value, np.zeros(pv.shape)) # latitude
assert r.unit == u.Pa
assert_array_equal(r.value, np.ones(pv.shape))
assert td.unit == u.radian/u.m
assert_array_equal(td.value, np.array([0.0125]*2))
assert pd.unit == u.radian/u.m
assert_array_equal(pd.value, np.zeros(pv.shape))
assert rd.unit == u.Pa/u.m
assert_array_equal(rd.value, np.zeros(pv.shape))
@pytest.mark.xfail(reason=(
'erfa ufuncs cannot take different names; it is not yet clear whether '
'this is changeable; see https://github.com/liberfa/pyerfa/issues/77'))
def test_pv2s_non_standard_names_and_units(self):
pv_value = np.array(self.pv_value, dtype=[('pos', 'f8'), ('vel', 'f8')])
pv = pv_value << u.Unit('Pa,Pa/m')
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(pv)
assert theta.unit == u.radian
assert_quantity_allclose(theta, [0, 90] * u.deg) # longitude
assert phi.unit == u.radian
assert_array_equal(phi.value, np.zeros(pv.shape)) # latitude
assert r.unit == u.Pa
assert_array_equal(r.value, np.ones(pv.shape))
assert td.unit == u.radian/u.m
assert_array_equal(td.value, np.array([0.0125]*2))
assert pd.unit == u.radian/u.m
assert_array_equal(pd.value, np.zeros(pv.shape))
assert rd.unit == u.Pa/u.m
assert_array_equal(rd.value, np.zeros(pv.shape))
def test_s2pv(self):
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(self.pv)
# On purpose change some of the units away from expected by s2pv.
pv = erfa_ufunc.s2pv(theta.to(u.deg), phi, r.to(u.m),
td.to(u.deg/u.day), pd, rd.to(u.m/u.s))
assert pv.unit == u.StructuredUnit('m, m/s', names=('p', 'v'))
assert_quantity_allclose(pv['p'], self.pv['p'], atol=1*u.m, rtol=0)
assert_quantity_allclose(pv['v'], self.pv['v'], atol=1*u.mm/u.s, rtol=0)
def test_pvstar(self):
ra, dec, pmr, pmd, px, rv, stat = erfa_ufunc.pvstar(self.pv)
assert_array_equal(stat, np.zeros(self.pv.shape, dtype='i4'))
assert ra.unit == u.radian
assert_quantity_allclose(ra, [0, 90] * u.deg)
assert dec.unit == u.radian
assert_array_equal(dec.value, np.zeros(self.pv.shape)) # latitude
assert pmr.unit == u.radian/u.year
assert_quantity_allclose(pmr, [0.0125, 0.0125]*u.radian/u.day)
assert pmd.unit == u.radian/u.year
assert_array_equal(pmd.value, np.zeros(self.pv.shape))
assert px.unit == u.arcsec
assert_quantity_allclose(px, 1*u.radian)
assert rv.unit == u.km / u.s
assert_array_equal(rv.value, np.zeros(self.pv.shape))
def test_starpv(self):
ra, dec, pmr, pmd, px, rv, stat = erfa_ufunc.pvstar(self.pv)
pv, stat = erfa_ufunc.starpv(ra.to(u.deg), dec.to(u.deg), pmr, pmd,
px, rv.to(u.m/u.s))
assert_array_equal(stat, np.zeros(self.pv.shape, dtype='i4'))
assert pv.unit == self.pv.unit
# Roundtrip is not as good as hoped on 32bit, not clear why.
# But proper motions are ridiculously high...
assert_quantity_allclose(pv['p'], self.pv['p'], atol=1*u.m, rtol=0)
assert_quantity_allclose(pv['v'], self.pv['v'], atol=1*u.m/u.s, rtol=0)
def test_pvtob(self):
pv = erfa_ufunc.pvtob([90, 0]*u.deg, 0.*u.deg, 100*u.km,
0*u.deg, 0*u.deg, 0*u.deg, 90*u.deg)
assert pv.unit == u.StructuredUnit('m, m/s', names=('p', 'v'))
assert pv.unit['v'] == u.m / u.s
assert_quantity_allclose(pv['p'], [[-6478, 0, 0], [0, 6478, 0]]*u.km,
atol=2*u.km)
assert_quantity_allclose(pv['v'], [[0, -0.5, 0], [-0.5, 0, 0]]*u.km/u.s,
atol=0.1*u.km/u.s)
def test_pvdpv(self):
pvdpv = erfa_ufunc.pvdpv(self.pv, self.pv)
assert pvdpv['pdp'].unit == self.pv.unit['p'] ** 2
assert pvdpv['pdv'].unit == self.pv.unit['p'] * self.pv.unit['v']
assert_array_equal(pvdpv['pdp'], np.einsum('...i,...i->...',
self.pv['p'], self.pv['p']))
assert_array_equal(pvdpv['pdv'], 2*np.einsum('...i,...i->...',
self.pv['p'], self.pv['v']))
z_axis = u.Quantity(
np.array(([0, 0, 1], [0, 0, 0]), erfa_ufunc.dt_pv),
'1,1/s')
pvdpv2 = erfa_ufunc.pvdpv(self.pv, z_axis)
assert pvdpv2['pdp'].unit == self.pv.unit['p']
assert pvdpv2['pdv'].unit == self.pv.unit['v']
assert_array_equal(pvdpv2['pdp'].value, np.zeros(self.pv.shape))
assert_array_equal(pvdpv2['pdv'].value, np.zeros(self.pv.shape))
def test_pvxpv(self):
pvxpv = erfa_ufunc.pvxpv(self.pv, self.pv)
assert pvxpv['p'].unit == self.pv.unit['p'] ** 2
assert pvxpv['v'].unit == self.pv.unit['p'] * self.pv.unit['v']
assert_array_equal(pvxpv['p'].value, np.zeros(self.pv['p'].shape))
assert_array_equal(pvxpv['v'].value, np.zeros(self.pv['v'].shape))
z_axis = u.Quantity(
np.array(([0, 0, 1], [0, 0, 0]), erfa_ufunc.dt_pv),
'1,1/s')
pvxpv2 = erfa_ufunc.pvxpv(self.pv, z_axis)
assert pvxpv2['p'].unit == self.pv.unit['p']
assert pvxpv2['v'].unit == self.pv.unit['v']
assert_array_equal(pvxpv2['p'], [[0., -1, 0.],
[1., 0., 0.]] * u.AU)
assert_array_equal(pvxpv2['v'], [[0.0125, 0., 0.],
[0., 0.0125, 0.]] * u.AU / u.day)
def test_pvm(self):
pm, vm = erfa_ufunc.pvm(self.pv)
assert pm.unit == self.pv.unit['p']
assert vm.unit == self.pv.unit['v']
assert_array_equal(pm, np.linalg.norm(self.pv['p'], axis=-1))
assert_array_equal(vm, np.linalg.norm(self.pv['v'], axis=-1))
def test_pvmpv(self):
pvmpv = erfa_ufunc.pvmpv(self.pv, self.pv)
assert pvmpv.unit == self.pv.unit
assert_array_equal(pvmpv['p'], 0*self.pv['p'])
assert_array_equal(pvmpv['v'], 0*self.pv['v'])
def test_pvppv(self):
pvppv = erfa_ufunc.pvppv(self.pv, self.pv)
assert pvppv.unit == self.pv.unit
assert_array_equal(pvppv['p'], 2*self.pv['p'])
assert_array_equal(pvppv['v'], 2*self.pv['v'])
def test_pvu(self):
pvu = erfa_ufunc.pvu(86400*u.s, self.pv)
assert pvu.unit == self.pv.unit
assert_array_equal(pvu['p'], self.pv['p'] + 1*u.day*self.pv['v'])
assert_array_equal(pvu['v'], self.pv['v'])
def test_pvup(self):
pvup = erfa_ufunc.pvup(86400*u.s, self.pv)
assert pvup.unit == self.pv.unit['p']
assert_array_equal(pvup, self.pv['p'] + 1*u.day*self.pv['v'])
def test_sxpv(self):
# Not a realistic example!!
sxpv = erfa_ufunc.sxpv(10., self.pv)
assert sxpv.unit == self.pv.unit
assert_array_equal(sxpv['p'], self.pv['p']*10)
assert_array_equal(sxpv['v'], self.pv['v']*10)
sxpv2 = erfa_ufunc.sxpv(30.*u.s, self.pv)
assert sxpv2.unit == u.StructuredUnit('AU s,AU s/d', names=('p', 'v'))
assert_array_equal(sxpv2['p'], self.pv['p']*30*u.s)
assert_array_equal(sxpv2['v'], self.pv['v']*30*u.s)
def test_s2xpv(self):
# Not a realistic example!!
s2xpv = erfa_ufunc.s2xpv(10., 1*u.s, self.pv)
assert s2xpv.unit == u.StructuredUnit('AU,AU s/d', names=('p', 'v'))
assert_array_equal(s2xpv['p'], self.pv['p']*10)
assert_array_equal(s2xpv['v'], self.pv['v']*u.s)
@pytest.mark.parametrize('r', [
np.eye(3),
np.array([[0., -1., 0.],
[1., 0., 0.],
[0., 0., 1.]]),
np.eye(3) / u.s])
def test_rxpv(self, r):
result = erfa_ufunc.rxpv(r, self.pv)
assert_array_equal(result['p'], np.einsum('...ij,...j->...i',
r, self.pv['p']))
assert_array_equal(result['v'], np.einsum('...ij,...j->...i',
r, self.pv['v']))
@pytest.mark.parametrize('r', [
np.eye(3),
np.array([[0., -1., 0.],
[1., 0., 0.],
[0., 0., 1.]]),
np.eye(3) / u.s])
def test_trxpv(self, r):
result = erfa_ufunc.trxpv(r, self.pv)
assert_array_equal(result['p'], np.einsum('...ij,...j->...i',
r.T, self.pv['p']))
assert_array_equal(result['v'], np.einsum('...ij,...j->...i',
r.T, self.pv['v']))
@pytest.mark.xfail(erfa.__version__ < '1.7.3.1',
reason='dt_eraLDBODY incorrectly defined', scope='class')
class TestEraStructUfuncs:
def setup_class(self):
# From t_ldn in t_erfa_c.c
ldbody = np.array(
[(0.00028574, 3e-10, ([-7.81014427, -5.60956681, -1.98079819],
[0.0030723249, -0.00406995477, -0.00181335842])),
(0.00095435, 3e-9, ([0.738098796, 4.63658692, 1.9693136],
[-0.00755816922, 0.00126913722, 0.000727999001])),
(1.0, 6e-6, ([-0.000712174377, -0.00230478303, -0.00105865966],
[6.29235213e-6, -3.30888387e-7, -2.96486623e-7]))],
dtype=erfa_ufunc.dt_eraLDBODY)
ldbody_unit = u.StructuredUnit('Msun,radian,(AU,AU/day)', ldbody.dtype)
self.ldbody = ldbody << ldbody_unit
self.ob = [-0.974170437, -0.2115201, -0.0917583114] << u.AU
self.sc = np.array([-0.763276255, -0.608633767, -0.216735543])
# From t_atciq in t_erfa_c.c
astrom, eo = erfa_ufunc.apci13(2456165.5, 0.401182685)
self.astrom_unit = u.StructuredUnit(
'yr,AU,1,AU,1,1,1,rad,rad,rad,rad,1,1,1,rad,rad,rad',
astrom.dtype)
self.astrom = astrom << self.astrom_unit
self.rc = 2.71 * u.rad
self.dc = 0.174 * u.rad
self.pr = 1e-5 * u.rad/u.year
self.pd = 5e-6 * u.rad/u.year
self.px = 0.1 * u.arcsec
self.rv = 55.0 * u.km/u.s
def test_ldn_basic(self):
sn = erfa_ufunc.ldn(self.ldbody, self.ob, self.sc)
assert_quantity_allclose(sn, [-0.7632762579693333866,
-0.6086337636093002660,
-0.2167355420646328159] * u.one,
atol=1e-12, rtol=0)
def test_ldn_in_other_unit(self):
ldbody = self.ldbody.to('kg,rad,(m,m/s)')
ob = self.ob.to('m')
sn = erfa_ufunc.ldn(ldbody, ob, self.sc)
assert_quantity_allclose(sn, [-0.7632762579693333866,
-0.6086337636093002660,
-0.2167355420646328159] * u.one,
atol=1e-12, rtol=0)
def test_ldn_in_SI(self):
sn = erfa_ufunc.ldn(self.ldbody.si, self.ob.si, self.sc)
assert_quantity_allclose(sn, [-0.7632762579693333866,
-0.6086337636093002660,
-0.2167355420646328159] * u.one,
atol=1e-12, rtol=0)
def test_aper(self):
along = self.astrom['along']
astrom2 = erfa_ufunc.aper(10*u.deg, self.astrom)
assert astrom2['eral'].unit == u.radian
assert_quantity_allclose(astrom2['eral'], along+10*u.deg)
astrom3 = self.astrom.to('s,km,1,km,1,1,1,deg,deg,deg,deg,1,1,1,rad,rad,rad')
astrom4 = erfa_ufunc.aper(10*u.deg, astrom3)
assert astrom3['eral'].unit == u.rad
assert astrom4['eral'].unit == u.deg
assert astrom4.unit == 's,km,1,km,1,1,1,deg,deg,deg,deg,1,1,1,deg,rad,rad'
assert_quantity_allclose(astrom4['eral'], along+10*u.deg)
def test_atciq_basic(self):
ri, di = erfa_ufunc.atciq(self.rc, self.dc, self.pr, self.pd,
self.px, self.rv, self.astrom)
assert_quantity_allclose(ri, 2.710121572968696744*u.rad)
assert_quantity_allclose(di, 0.1729371367219539137*u.rad)
def test_atciq_in_other_unit(self):
astrom = self.astrom.to('s,km,1,km,1,1,1,deg,deg,deg,deg,1,1,1,deg,deg,deg')
ri, di = erfa_ufunc.atciq(self.rc.to(u.deg), self.dc.to(u.deg),
self.pr.to(u.mas/u.yr), self.pd.to(u.mas/u.yr),
self.px, self.rv.to(u.m/u.s), astrom)
assert_quantity_allclose(ri, 2.710121572968696744*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(di, 0.1729371367219539137*u.rad, atol=1e-12*u.rad)
def test_atciqn(self):
ri, di = erfa_ufunc.atciqn(self.rc.to(u.deg), self.dc.to(u.deg),
self.pr.to(u.mas/u.yr), self.pd.to(u.mas/u.yr),
self.px, self.rv.to(u.m/u.s), self.astrom.si,
self.ldbody.si)
assert_quantity_allclose(ri, 2.710122008104983335*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(di, 0.1729371916492767821*u.rad, atol=1e-12*u.rad)
def test_atciqz(self):
ri, di = erfa_ufunc.atciqz(self.rc.to(u.deg), self.dc.to(u.deg),
self.astrom.si)
assert_quantity_allclose(ri, 2.709994899247256984*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(di, 0.1728740720984931891*u.rad, atol=1e-12*u.rad)
def test_aticq(self):
ri = 2.710121572969038991 * u.rad
di = 0.1729371367218230438 * u.rad
rc, dc = erfa_ufunc.aticq(ri.to(u.deg), di.to(u.deg), self.astrom.si)
assert_quantity_allclose(rc, 2.710126504531716819*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(dc, 0.1740632537627034482*u.rad, atol=1e-12*u.rad)
def test_aticqn(self):
ri = 2.709994899247599271 * u.rad
di = 0.1728740720983623469 * u.rad
rc, dc = erfa_ufunc.aticqn(ri.to(u.deg), di.to(u.deg), self.astrom.si,
self.ldbody.si)
assert_quantity_allclose(rc, 2.709999575033027333*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(dc, 0.1739999656316469990*u.rad, atol=1e-12*u.rad)
def test_atioq_atoiq(self):
astrom, _ = erfa_ufunc.apio13(2456384.5, 0.969254051, 0.1550675,
-0.527800806, -1.2345856, 2738.0,
2.47230737e-7, 1.82640464e-6,
731.0, 12.8, 0.59, 0.55)
astrom = astrom << self.astrom_unit
ri = 2.710121572969038991 * u.rad
di = 0.1729371367218230438 * u.rad
aob, zob, hob, dob, rob = erfa_ufunc.atioq(ri.to(u.deg), di.to(u.deg),
astrom.si)
assert_quantity_allclose(aob, 0.9233952224895122499e-1*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(zob, 1.407758704513549991*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(hob, -0.9247619879881698140e-1*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(dob, 0.1717653435756234676*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(rob, 2.710085107988480746*u.rad, atol=1e-12*u.rad)
# Sadly does not just use the values from above.
ob1 = 2.710085107986886201 * u.rad
ob2 = 0.1717653435758265198 * u.rad
ri2, di2 = erfa_ufunc.atoiq("R", ob1.to(u.deg), ob2.to(u.deg), astrom.si)
assert_quantity_allclose(ri2, 2.710121574447540810*u.rad, atol=1e-12*u.rad)
assert_quantity_allclose(di2, 0.17293718391166087785*u.rad, atol=1e-12*u.rad)
@pytest.mark.xfail(erfa.__version__ < '2.0.0', reason='comparisons changed')
def test_apio(self):
sp = -3.01974337e-11 * u.rad
theta = 3.14540971 * u.rad
elong = -0.527800806 * u.rad
phi = -1.2345856 * u.rad
hm = 2738.0 * u.m
xp = 2.47230737e-7 * u.rad
yp = 1.82640464e-6 * u.rad
refa = 0.000201418779 * u.rad
refb = -2.36140831e-7 * u.rad
astrom = erfa_ufunc.apio(sp.to(u.deg), theta, elong, phi, hm.to(u.km),
xp, yp, refa, refb)
assert astrom.unit == self.astrom_unit
for name, value in [
('along', -0.5278008060295995734),
('xpl', 0.1133427418130752958e-5),
('ypl', 0.1453347595780646207e-5),
('sphi', -0.9440115679003211329),
('cphi', 0.3299123514971474711),
('diurab', 0.5135843661699913529e-6),
('eral', 2.617608903970400427),
('refa', 0.2014187790000000000e-3),
('refb', -0.2361408310000000000e-6)]:
assert_quantity_allclose(astrom[name], value * self.astrom_unit[name],
rtol=1e-12, atol=0*self.astrom_unit[name])
|
7fa2087ebbaf653053dd67d4094541ad939144e5f38d8d4748170b1b7e183fb5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import itertools
import numpy as np
import numpy.lib.recfunctions as rfn
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.units.quantity_helper.function_helpers import (
ARRAY_FUNCTION_ENABLED, DISPATCHED_FUNCTIONS, FUNCTION_HELPERS, IGNORED_FUNCTIONS,
SUBCLASS_SAFE_FUNCTIONS, TBD_FUNCTIONS, UNSUPPORTED_FUNCTIONS)
from astropy.utils.compat import NUMPY_LT_1_20, NUMPY_LT_1_23
needs_array_function = pytest.mark.xfail(
not ARRAY_FUNCTION_ENABLED,
reason="Needs __array_function__ support")
# To get the functions that could be covered, we look for those that
# are wrapped. Of course, this does not give a full list pre-1.17.
def get_wrapped_functions(*modules):
wrapped_functions = {}
for mod in modules:
for name, f in mod.__dict__.items():
if f is np.printoptions:
continue
if callable(f) and hasattr(f, '__wrapped__'):
wrapped_functions[name] = f
return wrapped_functions
all_wrapped_functions = get_wrapped_functions(np, np.fft, np.linalg, np.lib.recfunctions)
all_wrapped = set(all_wrapped_functions.values())
class CoverageMeta(type):
"""Meta class that tracks which functions are covered by tests.
Assumes that a test is called 'test_<function_name>'.
"""
covered = set()
def __new__(mcls, name, bases, members):
for k, v in members.items():
if inspect.isfunction(v) and k.startswith('test'):
f = k.replace('test_', '')
if f in all_wrapped_functions:
mcls.covered.add(all_wrapped_functions[f])
return super().__new__(mcls, name, bases, members)
class BasicTestSetup(metaclass=CoverageMeta):
"""Test setup for functions that should not change the unit.
Also provides a default Quantity with shape (3, 3) and units of m.
"""
def setup(self):
self.q = np.arange(9.).reshape(3, 3) / 4. * u.m
class InvariantUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
o = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs) * self.q.unit
assert o.shape == expected.shape
assert np.all(o == expected)
class NoUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, *kwargs)
assert type(out) is type(expected)
if isinstance(expected, tuple):
assert all(np.all(o == x) for o, x in zip(out, expected))
else:
assert np.all(out == expected)
class TestShapeInformation(BasicTestSetup):
def test_shape(self):
assert np.shape(self.q) == (3, 3)
def test_size(self):
assert np.size(self.q) == 9
def test_ndim(self):
assert np.ndim(self.q) == 2
class TestShapeManipulation(InvariantUnitTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (9, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
q = 1. * u.m
o, so = np.atleast_1d(q, self.q)
assert o.shape == (1,)
assert o == q
expected = np.atleast_1d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_2d(self):
q = 1. * u.m
o, so = np.atleast_2d(q, self.q)
assert o.shape == (1, 1)
assert o == q
expected = np.atleast_2d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_3d(self):
q = 1. * u.m
o, so = np.atleast_3d(q, self.q)
assert o.shape == (1, 1, 1)
assert o == q
expected = np.atleast_3d(self.q.value) * u.m
assert np.all(so == expected)
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.q[:, np.newaxis, :])
assert o.shape == (3, 3)
assert np.all(o == self.q)
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
self.check(np.broadcast_to, (3, 3, 3), subok=True)
out = np.broadcast_to(self.q, (3, 3, 3))
assert type(out) is np.ndarray # NOT Quantity
def test_broadcast_arrays(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
q2 = np.ones((3, 3, 3)) / u.s
o1, o2 = np.broadcast_arrays(self.q, q2, subok=True)
assert isinstance(o1, u.Quantity)
assert isinstance(o2, u.Quantity)
assert o1.shape == o2.shape == (3, 3, 3)
assert np.all(o1 == self.q)
assert np.all(o2 == q2)
a1, a2 = np.broadcast_arrays(self.q, q2)
assert type(a1) is np.ndarray
assert type(a2) is np.ndarray
class TestArgFunctions(NoUnitTestSetup):
def test_argmin(self):
self.check(np.argmin)
def test_argmax(self):
self.check(np.argmax)
def test_argsort(self):
self.check(np.argsort)
def test_lexsort(self):
self.check(np.lexsort)
def test_searchsorted(self):
q = self.q.ravel()
q2 = np.array([150., 350.]) * u.cm
out = np.searchsorted(q, q2)
expected = np.searchsorted(q.value, q2.to_value(q.unit))
assert np.all(out == expected)
def test_nonzero(self):
self.check(np.nonzero)
def test_argwhere(self):
self.check(np.argwhere)
@needs_array_function
def test_argpartition(self):
self.check(np.argpartition, 2)
def test_flatnonzero(self):
self.check(np.flatnonzero)
class TestAlongAxis(BasicTestSetup):
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
out = np.take_along_axis(self.q, indices, axis=0)
expected = np.take_along_axis(self.q.value, indices,
axis=0) * self.q.unit
assert np.all(out == expected)
def test_put_along_axis(self):
q = self.q.copy()
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
np.put_along_axis(q, indices, axis=0, values=-100 * u.cm)
expected = q.value.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
expected = expected * q.unit
assert np.all(q == expected)
@pytest.mark.parametrize('axis', (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.q)
expected = np.apply_along_axis(np.square, axis,
self.q.value) * self.q.unit ** 2
assert_array_equal(out, expected)
@needs_array_function
@pytest.mark.parametrize('axes', ((1,), (0,), (0, 1)))
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.sum(np.square(x), axis)
out = np.apply_over_axes(function, self.q, axes)
expected = np.apply_over_axes(function, self.q.value, axes)
expected = expected * self.q.unit ** (2 * len(axes))
assert_array_equal(out, expected)
class TestIndicesFrom(NoUnitTestSetup):
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantUnitTestSetup):
def setup(self):
self.q = (np.arange(9.).reshape(3, 3) + 1j) * u.m
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantUnitTestSetup):
@needs_array_function
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.q)
assert_array_equal(copy, self.q)
@needs_array_function
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.q)
assert_array_equal(farray, self.q)
def test_empty_like(self):
o = np.empty_like(self.q)
assert o.shape == (3, 3)
assert isinstance(o, u.Quantity)
assert o.unit == self.q.unit
o2 = np.empty_like(prototype=self.q)
assert o2.shape == (3, 3)
assert isinstance(o2, u.Quantity)
assert o2.unit == self.q.unit
o3 = np.empty_like(self.q, subok=False)
assert type(o3) is np.ndarray
def test_zeros_like(self):
self.check(np.zeros_like)
o2 = np.zeros_like(a=self.q)
assert_array_equal(o2, self.q * 0.)
def test_ones_like(self):
self.check(np.ones_like)
@needs_array_function
def test_full_like(self):
o = np.full_like(self.q, 0.5 * u.km)
expected = np.empty_like(self.q.value) * u.m
expected[...] = 0.5 * u.km
assert np.all(o == expected)
with pytest.raises(u.UnitsError):
np.full_like(self.q, 0.5 * u.s)
class TestAccessingParts(InvariantUnitTestSetup):
def test_diag(self):
self.check(np.diag)
@needs_array_function
def test_diag_1d_input(self):
# Also check 1-D case; drops unit w/o __array_function__.
q = self.q.ravel()
o = np.diag(q)
expected = np.diag(q.value) << q.unit
assert o.unit == self.q.unit
assert o.shape == expected.shape
assert_array_equal(o, expected)
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False, True], self.q, axis=0)
expected = np.compress([True, False, True], self.q.value,
axis=0) * self.q.unit
assert np.all(o == expected)
def test_extract(self):
o = np.extract([True, False, True], self.q)
expected = np.extract([True, False, True],
self.q.value) * self.q.unit
assert np.all(o == expected)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_trim_zeros(self):
q = self.q.ravel()
out = np.trim_zeros(q)
expected = np.trim_zeros(q.value) * u.m
assert np.all(out == expected)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(metaclass=CoverageMeta):
def test_put(self):
q = np.arange(3.) * u.m
np.put(q, [0, 2], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
@needs_array_function
def test_putmask(self):
q = np.arange(3.) * u.m
mask = [True, False, True]
values = [50, 0, 150] * u.cm
np.putmask(q, mask, values)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
with pytest.raises(u.UnitsError):
np.putmask(q, mask, values.value)
with pytest.raises(u.UnitsError):
np.putmask(q.value, mask, values)
a = np.arange(3.)
values = [50, 0, 150] * u.percent
np.putmask(a, mask, values)
expected = np.array([0.5, 1., 1.5])
assert np.all(a == expected)
@needs_array_function
def test_place(self):
q = np.arange(3.) * u.m
np.place(q, [True, False, True], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.)
np.place(a, [True, False, True], [50, 150] * u.percent)
assert type(a) is np.ndarray
expected = np.array([0.5, 1., 1.5])
assert np.all(a == expected)
@needs_array_function
def test_copyto(self):
q = np.arange(3.) * u.m
np.copyto(q, [50, 0, 150] * u.cm, where=[True, False, True])
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.)
np.copyto(a, [50, 0, 150] * u.percent, where=[True, False, True])
assert type(a) is np.ndarray
expected = np.array([0.5, 1., 1.5])
assert np.all(a == expected)
def test_fill_diagonal(self):
q = np.arange(9.).reshape(3, 3) * u.m
expected = q.value.copy()
np.fill_diagonal(expected, 0.25)
expected = expected * u.m
np.fill_diagonal(q, 25. * u.cm)
assert q.unit == u.m
assert np.all(q == expected)
class TestRepeat(InvariantUnitTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
@needs_array_function
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(metaclass=CoverageMeta):
def setup(self):
self.q1 = np.arange(6.).reshape(2, 3) * u.m
self.q2 = self.q1.to(u.cm)
def check(self, func, *args, **kwargs):
q_list = kwargs.pop('q_list', [self.q1, self.q2])
q_ref = kwargs.pop('q_ref', q_list[0])
o = func(q_list, *args, **kwargs)
v_list = [q_ref._to_own_unit(q) for q in q_list]
expected = func(v_list, *args, **kwargs) * q_ref.unit
assert o.shape == expected.shape
assert np.all(o == expected)
@needs_array_function
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
self.check(np.concatenate, q_list=[np.zeros(self.q1.shape), self.q1, self.q2],
q_ref=self.q1)
out = np.empty((4, 3)) * u.dimensionless_unscaled
result = np.concatenate([self.q1, self.q2], out=out)
assert out is result
assert out.unit == self.q1.unit
expected = np.concatenate(
[self.q1.value, self.q2.to_value(self.q1.unit)]) * self.q1.unit
assert np.all(result == expected)
with pytest.raises(TypeError):
np.concatenate([self.q1, object()])
@needs_array_function
def test_stack(self):
self.check(np.stack)
@needs_array_function
def test_column_stack(self):
self.check(np.column_stack)
@needs_array_function
def test_hstack(self):
self.check(np.hstack)
@needs_array_function
def test_vstack(self):
self.check(np.vstack)
@needs_array_function
def test_dstack(self):
self.check(np.dstack)
@needs_array_function
def test_block(self):
self.check(np.block)
result = np.block([[0., 1.*u.m], [1.*u.cm, 2.*u.km]])
assert np.all(result == np.block([[0, 1.], [.01, 2000.]]) << u.m)
@needs_array_function
def test_append(self):
out = np.append(self.q1, self.q2, axis=0)
assert out.unit == self.q1.unit
expected = np.append(self.q1.value, self.q2.to_value(self.q1.unit),
axis=0) * self.q1.unit
assert np.all(out == expected)
a = np.arange(3.)
result = np.append(a, 50. * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.append(a, 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
@needs_array_function
def test_insert(self):
# Unit of inserted values is not ignored.
q = np.arange(12.).reshape(6, 2) * u.m
out = np.insert(q, (3, 5), [50., 25.] * u.cm)
assert isinstance(out, u.Quantity)
assert out.unit == q.unit
expected = np.insert(q.value, (3, 5), [0.5, 0.25]) << q.unit
assert np.all(out == expected)
# 0 can have any unit.
out2 = np.insert(q, (3, 5), 0)
expected2 = np.insert(q.value, (3, 5), 0) << q.unit
assert np.all(out2 == expected2)
a = np.arange(3.)
result = np.insert(a, (2,), 50. * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.insert(a, (2,), 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
with pytest.raises(TypeError):
np.insert(q, 3 * u.cm, 50. * u.cm)
with pytest.raises(u.UnitsError):
np.insert(q, (3, 5), 0. * u.s)
@needs_array_function
def test_pad(self):
q = np.arange(1., 6.) * u.m
out = np.pad(q, (2, 3), 'constant', constant_values=(0., 150.*u.cm))
assert out.unit == q.unit
expected = np.pad(q.value, (2, 3), 'constant',
constant_values=(0., 1.5)) * q.unit
assert np.all(out == expected)
out2 = np.pad(q, (2, 3), 'constant', constant_values=150.*u.cm)
assert out2.unit == q.unit
expected2 = np.pad(q.value, (2, 3), 'constant',
constant_values=1.5) * q.unit
assert np.all(out2 == expected2)
out3 = np.pad(q, (2, 3), 'linear_ramp', end_values=(25.*u.cm, 0.))
assert out3.unit == q.unit
expected3 = np.pad(q.value, (2, 3), 'linear_ramp',
end_values=(0.25, 0.)) * q.unit
assert np.all(out3 == expected3)
class TestSplit(metaclass=CoverageMeta):
def setup(self):
self.q = np.arange(54.).reshape(3, 3, 6) * u.m
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs)
expected = [x * self.q.unit for x in expected]
assert len(out) == len(expected)
assert all(o.shape == x.shape for o, x in zip(out, expected))
assert all(np.all(o == x) for o, x in zip(out, expected))
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestUfuncReductions(InvariantUnitTestSetup):
def test_amax(self):
self.check(np.amax)
def test_amin(self):
self.check(np.amin)
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
with pytest.raises(TypeError):
np.any(self.q)
def test_all(self):
with pytest.raises(TypeError):
np.all(self.q)
def test_sometrue(self):
with pytest.raises(TypeError):
np.sometrue(self.q)
def test_alltrue(self):
with pytest.raises(TypeError):
np.alltrue(self.q)
def test_prod(self):
with pytest.raises(u.UnitsError):
np.prod(self.q)
def test_product(self):
with pytest.raises(u.UnitsError):
np.product(self.q)
def test_cumprod(self):
with pytest.raises(u.UnitsError):
np.cumprod(self.q)
def test_cumproduct(self):
with pytest.raises(u.UnitsError):
np.cumproduct(self.q)
class TestUfuncLike(InvariantUnitTestSetup):
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round_(self):
self.check(np.round_)
def test_around(self):
self.check(np.around)
def test_fix(self):
self.check(np.fix)
def test_angle(self):
q = np.array([1+0j, 0+1j, 1+1j, 0+0j]) * u.m
out = np.angle(q)
expected = np.angle(q.value) * u.radian
assert np.all(out == expected)
def test_i0(self):
q = np.array([0., 10., 20.]) * u.percent
out = np.i0(q)
expected = np.i0(q.to_value(u.one)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.i0(self.q)
def test_clip(self):
qmin = 200 * u.cm
qmax = [270, 280, 290] * u.cm
out = np.clip(self.q, qmin, qmax)
expected = np.clip(self.q.value, qmin.to_value(self.q.unit),
qmax.to_value(self.q.unit)) * self.q.unit
assert np.all(out == expected)
@needs_array_function
def test_sinc(self):
q = [0., 3690., -270., 690.] * u.deg
out = np.sinc(q)
expected = np.sinc(q.to_value(u.radian)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.sinc(1.*u.one)
@needs_array_function
def test_where(self):
out = np.where([True, False, True], self.q, 1. * u.km)
expected = np.where([True, False, True], self.q.value,
1000.) * self.q.unit
assert np.all(out == expected)
@needs_array_function
def test_choose(self):
# from np.choose docstring
a = np.array([0, 1]).reshape((2, 1, 1))
q1 = np.array([1, 2, 3]).reshape((1, 3, 1)) * u.cm
q2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5)) * u.m
out = np.choose(a, (q1, q2))
# result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
expected = np.choose(a, (q1.value, q2.to_value(q1.unit))) * u.cm
assert np.all(out == expected)
@needs_array_function
def test_select(self):
q = self.q
out = np.select([q < 0.55 * u.m, q > 1. * u.m],
[q, q.to(u.cm)], default=-1. * u.km)
expected = np.select([q.value < 0.55, q.value > 1],
[q.value, q.value], default=-1000) * u.m
assert np.all(out == expected)
@needs_array_function
def test_real_if_close(self):
q = np.array([1+0j, 0+1j, 1+1j, 0+0j]) * u.m
out = np.real_if_close(q)
expected = np.real_if_close(q.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_tril(self):
self.check(np.tril)
@needs_array_function
def test_triu(self):
self.check(np.triu)
@needs_array_function
def test_unwrap(self):
q = [0., 3690., -270., 690.] * u.deg
out = np.unwrap(q)
expected = (np.unwrap(q.to_value(u.rad)) * u.rad).to(q.unit)
assert out.unit == expected.unit
assert np.allclose(out, expected, atol=1*u.urad, rtol=0)
with pytest.raises(u.UnitsError):
np.unwrap([1., 2.]*u.m)
with pytest.raises(u.UnitsError):
np.unwrap(q, discont=1.*u.m)
def test_nan_to_num(self):
q = np.array([-np.inf, +np.inf, np.nan, 3., 4.]) * u.m
out = np.nan_to_num(q)
expected = np.nan_to_num(q.value) * q.unit
assert np.all(out == expected)
@needs_array_function
def test_nan_to_num_complex(self):
q = np.array([-np.inf, +np.inf, np.nan, 3., 4.]) * u.m
out = np.nan_to_num(q, nan=1.*u.km, posinf=2.*u.km, neginf=-2*u.km)
expected = [-2000., 2000., 1000., 3., 4.] * u.m
assert np.all(out == expected)
class TestUfuncLikeTests(metaclass=CoverageMeta):
def setup(self):
self.q = np.array([-np.inf, +np.inf, np.nan, 3., 4.]) * u.m
def check(self, func):
out = func(self.q)
expected = func(self.q.value)
assert type(out) is np.ndarray
assert out.dtype.kind == 'b'
assert np.all(out == expected)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
assert not np.isreal([1. + 1j]*u.m)
def test_iscomplex(self):
self.check(np.iscomplex)
assert np.iscomplex([1. + 1j]*u.m)
def test_isclose(self):
q1 = np.arange(3.) * u.m
q2 = np.array([0., 102., 199.]) * u.cm
atol = 1.5 * u.cm
rtol = 1. * u.percent
out = np.isclose(q1, q2, atol=atol)
expected = np.isclose(q1.value, q2.to_value(q1.unit),
atol=atol.to_value(q1.unit))
assert type(out) is np.ndarray
assert out.dtype.kind == 'b'
assert np.all(out == expected)
out = np.isclose(q1, q2, atol=0, rtol=rtol)
expected = np.isclose(q1.value, q2.to_value(q1.unit),
atol=0, rtol=0.01)
assert type(out) is np.ndarray
assert out.dtype.kind == 'b'
assert np.all(out == expected)
@needs_array_function
def test_allclose_atol_default_unit(self):
q_cm = self.q.to(u.cm)
out = np.isclose(self.q, q_cm)
expected = np.isclose(self.q.value, q_cm.to_value(u.m))
assert np.all(out == expected)
q1 = np.arange(3.) * u.m
q2 = np.array([0., 101., 198.]) * u.cm
out = np.isclose(q1, q2, atol=0.011, rtol=0)
expected = np.isclose(q1.value, q2.to_value(q1.unit),
atol=0.011, rtol=0)
assert np.all(out == expected)
out2 = np.isclose(q2, q1, atol=0.011, rtol=0)
expected2 = np.isclose(q2.value, q1.to_value(q2.unit),
atol=0.011, rtol=0)
assert np.all(out2 == expected2)
class TestReductionLikeFunctions(InvariantUnitTestSetup):
def test_average(self):
q1 = np.arange(9.).reshape(3, 3) * u.m
q2 = np.eye(3) / u.s
o = np.average(q1, weights=q2)
expected = np.average(q1.value, weights=q2.value) * u.m
assert np.all(o == expected)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
o = np.var(self.q)
expected = np.var(self.q.value) * self.q.unit ** 2
assert np.all(o == expected)
def test_median(self):
self.check(np.median)
@needs_array_function
def test_quantile(self):
self.check(np.quantile, 0.5)
o = np.quantile(self.q, 50 * u.percent)
expected = np.quantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
# For ndarray input, we return a Quantity.
o2 = np.quantile(self.q.value, 50 * u.percent)
assert o2.unit == u.dimensionless_unscaled
assert np.all(o2 == expected.value)
o3 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, out=o3)
assert result is o3
assert np.all(o3 == expected)
o4 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, None, o4)
assert result is o4
assert np.all(o4 == expected)
@needs_array_function
def test_percentile(self):
self.check(np.percentile, 0.5)
o = np.percentile(self.q, 0.5 * u.one)
expected = np.percentile(self.q.value, 50) * u.m
assert np.all(o == expected)
def test_trace(self):
self.check(np.trace)
@needs_array_function
def test_count_nonzero(self):
q1 = np.arange(9.).reshape(3, 3) * u.m
o = np.count_nonzero(q1)
assert type(o) is not u.Quantity
assert o == 8
o = np.count_nonzero(q1, axis=1)
# Returns integer Quantity with units of m
assert type(o) is np.ndarray
assert np.all(o == np.array([2, 3, 3]))
def test_allclose(self):
q1 = np.arange(3.) * u.m
q2 = np.array([0., 101., 199.]) * u.cm
atol = 2 * u.cm
rtol = 1. * u.percent
assert np.allclose(q1, q2, atol=atol)
assert np.allclose(q1, q2, atol=0., rtol=rtol)
@needs_array_function
def test_allclose_atol_default_unit(self):
q1 = np.arange(3.) * u.m
q2 = np.array([0., 101., 199.]) * u.cm
assert np.allclose(q1, q2, atol=0.011, rtol=0)
assert not np.allclose(q2, q1, atol=0.011, rtol=0)
def test_allclose_failures(self):
q1 = np.arange(3.) * u.m
q2 = np.array([0., 101., 199.]) * u.cm
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=2*u.s, rtol=0)
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=0, rtol=1.*u.s)
@needs_array_function
def test_array_equal(self):
q1 = np.arange(3.) * u.m
q2 = q1.to(u.cm)
assert np.array_equal(q1, q2)
q3 = q1.value * u.cm
assert not np.array_equal(q1, q3)
@needs_array_function
def test_array_equiv(self):
q1 = np.array([[0., 1., 2.]]*3) * u.m
q2 = q1[0].to(u.cm)
assert np.array_equiv(q1, q2)
q3 = q1[0].value * u.cm
assert not np.array_equiv(q1, q3)
class TestNanFunctions(InvariantUnitTestSetup):
def setup(self):
super().setup()
self.q[1, 1] = np.nan
def test_nanmax(self):
self.check(np.nanmax)
def test_nanmin(self):
self.check(np.nanmin)
def test_nanargmin(self):
out = np.nanargmin(self.q)
expected = np.nanargmin(self.q.value)
assert out == expected
def test_nanargmax(self):
out = np.nanargmax(self.q)
expected = np.nanargmax(self.q.value)
assert out == expected
def test_nanmean(self):
self.check(np.nanmean)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nansum(self):
self.check(np.nansum)
def test_nancumsum(self):
self.check(np.nancumsum)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanvar(self):
out = np.nanvar(self.q)
expected = np.nanvar(self.q.value) * self.q.unit ** 2
assert np.all(out == expected)
def test_nanprod(self):
with pytest.raises(u.UnitsError):
np.nanprod(self.q)
def test_nancumprod(self):
with pytest.raises(u.UnitsError):
np.nancumprod(self.q)
@needs_array_function
def test_nanquantile(self):
self.check(np.nanquantile, 0.5)
o = np.nanquantile(self.q, 50 * u.percent)
expected = np.nanquantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
@needs_array_function
def test_nanpercentile(self):
self.check(np.nanpercentile, 0.5)
o = np.nanpercentile(self.q, 0.5 * u.one)
expected = np.nanpercentile(self.q.value, 50) * u.m
assert np.all(o == expected)
class TestVariousProductFunctions(metaclass=CoverageMeta):
"""
Test functions that are similar to gufuncs
"""
@needs_array_function
def test_cross(self):
q1 = np.arange(6.).reshape(2, 3) * u.m
q2 = np.array([4., 5., 6.]) / u.s
o = np.cross(q1, q2)
expected = np.cross(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_outer(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([1, 2]) / u.s
o = np.outer(q1, q2)
assert np.all(o == np.array([[1, 2], [2, 4], [3, 6]]) * u.m / u.s)
o2 = 0 * o
result = np.outer(q1, q2, out=o2)
assert result is o2
assert np.all(o2 == o)
with pytest.raises(TypeError):
np.outer(q1, q2, out=object())
@needs_array_function
def test_inner(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([4, 5, 6]) / u.s
o = np.inner(q1, q2)
assert o == 32 * u.m / u.s
@needs_array_function
def test_dot(self):
q1 = np.array([1., 2., 3.]) * u.m
q2 = np.array([4., 5., 6.]) / u.s
o = np.dot(q1, q2)
assert o == 32. * u.m / u.s
@needs_array_function
def test_vdot(self):
q1 = np.array([1j, 2j, 3j]) * u.m
q2 = np.array([4j, 5j, 6j]) / u.s
o = np.vdot(q1, q2)
assert o == (32. + 0j) * u.m / u.s
@needs_array_function
def test_tensordot(self):
# From the docstring example
a = np.arange(60.).reshape(3, 4, 5) * u.m
b = np.arange(24.).reshape(4, 3, 2) / u.s
c = np.tensordot(a, b, axes=([1, 0], [0, 1]))
expected = np.tensordot(a.value, b.value,
axes=([1, 0], [0, 1])) * u.m / u.s
assert np.all(c == expected)
@needs_array_function
def test_kron(self):
q1 = np.eye(2) * u.m
q2 = np.ones(2) / u.s
o = np.kron(q1, q2)
expected = np.kron(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_einsum(self):
q1 = np.arange(9.).reshape(3, 3) * u.m
o = np.einsum('...i', q1)
assert np.all(o == q1)
o = np.einsum('ii', q1)
expected = np.einsum('ii', q1.value) * u.m
assert np.all(o == expected)
q2 = np.eye(3) / u.s
o2 = np.einsum('ij,jk', q1, q2)
assert np.all(o2 == q1 / u.s)
o3 = 0 * o2
result = np.einsum('ij,jk', q1, q2, out=o3)
assert result is o3
assert np.all(o3 == o2)
def test_einsum_path(self):
q1 = np.arange(9.).reshape(3, 3) * u.m
o = np.einsum_path('...i', q1)
assert o[0] == ['einsum_path', (0,)]
o = np.einsum_path('ii', q1)
assert o[0] == ['einsum_path', (0,)]
q2 = np.eye(3) / u.s
o = np.einsum_path('ij,jk', q1, q2)
assert o[0] == ['einsum_path', (0, 1)]
class TestIntDiffFunctions(metaclass=CoverageMeta):
def test_trapz(self):
y = np.arange(9.) * u.m / u.s
out = np.trapz(y)
expected = np.trapz(y.value) * y.unit
assert np.all(out == expected)
dx = 10. * u.s
out = np.trapz(y, dx=dx)
expected = np.trapz(y.value, dx=dx.value) * y.unit * dx.unit
assert np.all(out == expected)
x = np.arange(9.) * u.s
out = np.trapz(y, x)
expected = np.trapz(y.value, x.value) * y.unit * x.unit
assert np.all(out == expected)
def test_diff(self):
# Simple diff works out of the box.
x = np.arange(10.) * u.m
out = np.diff(x)
expected = np.diff(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_diff_prepend_append(self):
x = np.arange(10.) * u.m
out = np.diff(x, prepend=-12.5*u.cm, append=1*u.km)
expected = np.diff(x.value, prepend=-0.125, append=1000.) * x.unit
assert np.all(out == expected)
x = np.arange(10.) * u.m
out = np.diff(x, prepend=-12.5*u.cm, append=1*u.km, n=2)
expected = np.diff(x.value, prepend=-0.125, append=1000.,
n=2) * x.unit
assert np.all(out == expected)
with pytest.raises(TypeError):
np.diff(x, prepend=object())
def test_gradient(self):
# Simple gradient works out of the box.
x = np.arange(10.) * u.m
out = np.gradient(x)
expected = np.gradient(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_gradient_spacing(self):
# Simple gradient works out of the box.
x = np.arange(10.) * u.m
spacing = 10. * u.s
out = np.gradient(x, spacing)
expected = np.gradient(x.value, spacing.value) * (x.unit /
spacing.unit)
assert np.all(out == expected)
f = np.array([[1, 2, 6], [3, 4, 5]]) * u.m
dx = 2. * u.s
y = [1., 1.5, 3.5] * u.GHz
dfdx, dfdy = np.gradient(f, dx, y)
exp_dfdx, exp_dfdy = np.gradient(f.value, dx.value, y.value)
exp_dfdx = exp_dfdx * f.unit / dx.unit
exp_dfdy = exp_dfdy * f.unit / y.unit
assert np.all(dfdx == exp_dfdx)
assert np.all(dfdy == exp_dfdy)
dfdx2 = np.gradient(f, dx, axis=0)
assert np.all(dfdx2 == exp_dfdx)
dfdy2 = np.gradient(f, y, axis=(1,))
assert np.all(dfdy2 == exp_dfdy)
class TestSpaceFunctions(metaclass=CoverageMeta):
def test_linspace(self):
# Note: linspace gets unit of end point, not superlogical.
out = np.linspace(1000.*u.m, 10.*u.km, 5)
expected = np.linspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(6.).reshape(2, 3) * u.m
q2 = 10000. * u.cm
out = np.linspace(q1, q2, 5)
expected = np.linspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
@needs_array_function
def test_logspace(self):
unit = u.m / u.s**2
out = np.logspace(10.*u.dex(unit), 20*u.dex(unit), 10)
expected = np.logspace(10., 20., 10) * unit
assert np.all(out == expected)
out = np.logspace(10.*u.STmag, 20*u.STmag, 10)
expected = np.logspace(10., 20., 10, base=10.**(-0.4)) * u.ST
assert u.allclose(out, expected)
@needs_array_function
def test_geomspace(self):
out = np.geomspace(1000.*u.m, 10.*u.km, 5)
expected = np.geomspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(1., 7.).reshape(2, 3) * u.m
q2 = 10000. * u.cm
out = np.geomspace(q1, q2, 5)
expected = np.geomspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
class TestInterpolationFunctions(metaclass=CoverageMeta):
@needs_array_function
def test_interp(self):
x = np.array([1250., 2750.]) * u.m
xp = np.arange(5.) * u.km
yp = np.arange(5.) * u.day
out = np.interp(x, xp, yp)
expected = np.interp(x.to_value(xp.unit), xp.value, yp.value) * yp.unit
assert np.all(out == expected)
out = np.interp(x, xp, yp.value)
assert type(out) is np.ndarray
assert np.all(out == expected.value)
@needs_array_function
def test_piecewise(self):
x = np.linspace(-2.5, 2.5, 6) * u.m
out = np.piecewise(x, [x < 0, x >= 0], [-1*u.s, 1*u.day])
expected = np.piecewise(x.value, [x.value < 0, x.value >= 0],
[-1, 24*3600]) * u.s
assert out.unit == expected.unit
assert np.all(out == expected)
out2 = np.piecewise(x, [x < 1 * u.m, x >= 0],
[-1*u.s, 1*u.day, lambda x: 1*u.hour])
expected2 = np.piecewise(x.value, [x.value < 1, x.value >= 0],
[-1, 24*3600, 3600]) * u.s
assert out2.unit == expected2.unit
assert np.all(out2 == expected2)
out3 = np.piecewise(x, [x < 1 * u.m, x >= 0],
[0, 1*u.percent, lambda x: 1*u.one])
expected3 = np.piecewise(x.value, [x.value < 1, x.value >= 0],
[0, 0.01, 1]) * u.one
assert out3.unit == expected3.unit
assert np.all(out3 == expected3)
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x, [x], [0.])
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x.value, [x], [0.])
class TestBincountDigitize(metaclass=CoverageMeta):
@needs_array_function
def test_bincount(self):
i = np.array([1, 1, 2, 3, 2, 4])
weights = np.arange(len(i)) * u.Jy
out = np.bincount(i, weights)
expected = np.bincount(i, weights.value) * weights.unit
assert_array_equal(out, expected)
with pytest.raises(TypeError):
np.bincount(weights)
@needs_array_function
def test_digitize(self):
x = np.array([1500., 2500., 4500.]) * u.m
bins = np.arange(10.) * u.km
out = np.digitize(x, bins)
expected = np.digitize(x.to_value(bins.unit), bins.value)
assert_array_equal(out, expected)
class TestHistogramFunctions(metaclass=CoverageMeta):
def setup(self):
self.x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
self.y = np.array([1.2, 2.2, 2.4, 3.0, 4.0]) * u.cm
self.weights = np.arange(len(self.x)) / u.s
def check(self, function, *args, value_args=None, value_kwargs=None,
expected_units=None, **kwargs):
"""Check quanties are treated correctly in the histogram function.
Test is done by applying ``function(*args, **kwargs)``, where
the argument can be quantities, and comparing the result to
``function(*value_args, **value_kwargs)``, with the outputs
converted to quantities using the ``expected_units`` (where `None`
indicates the output is expected to be a regular array).
For ``**value_kwargs``, any regular ``kwargs`` are treated as
defaults, i.e., non-quantity arguments do not have to be repeated.
"""
if value_kwargs is None:
value_kwargs = kwargs
else:
for k, v in kwargs.items():
value_kwargs.setdefault(k, v)
# Get the result, using the Quantity override.
out = function(*args, **kwargs)
# Get the comparison, with non-Quantity arguments.
expected = function(*value_args, **value_kwargs)
# All histogram functions return a tuple of the actual histogram
# and the bin edges. First, check the actual histogram.
out_h = out[0]
expected_h = expected[0]
if expected_units[0] is not None:
expected_h = expected_h * expected_units[0]
assert_array_equal(out_h, expected_h)
# Check bin edges. Here, histogramdd returns an interable of the
# bin edges as the second return argument, while histogram and
# histogram2d return the bin edges directly.
if function is np.histogramdd:
bin_slice = 1
else:
bin_slice = slice(1, None)
for o_bin, e_bin, e_unit in zip(out[bin_slice],
expected[bin_slice],
expected_units[bin_slice]):
if e_unit is not None:
e_bin = e_bin * e_unit
assert_array_equal(o_bin, e_bin)
@needs_array_function
def test_histogram(self):
x = self.x
weights = self.weights
# Plain histogram.
self.check(np.histogram, x,
value_args=(x.value,),
expected_units=(None, x.unit))
# With bins.
self.check(np.histogram, x, [125, 200] * u.cm,
value_args=(x.value, [1.25, 2.]),
expected_units=(None, x.unit))
# With density.
self.check(np.histogram, x, [125, 200] * u.cm, density=True,
value_args=(x.value, [1.25, 2.]),
expected_units=(1/x.unit, x.unit))
# With weights.
self.check(np.histogram, x, [125, 200] * u.cm, weights=weights,
value_args=(x.value, [1.25, 2.]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit))
# With weights and density.
self.check(np.histogram, x, [125, 200] * u.cm,
weights=weights, density=True,
value_args=(x.value, [1.25, 2.]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit/x.unit, x.unit))
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram(x.value, [125, 200] * u.s)
@needs_array_function
def test_histogram_bin_edges(self):
x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
out_b = np.histogram_bin_edges(x)
expected_b = np.histogram_bin_edges(x.value) * x.unit
assert np.all(out_b == expected_b)
# With bins
out2_b = np.histogram_bin_edges(x, [125, 200] * u.cm)
expected2_b = np.histogram_bin_edges(x.value, [1.25, 2.]) * x.unit
assert np.all(out2_b == expected2_b)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x.value, [125, 200] * u.s)
@needs_array_function
def test_histogram2d(self):
x, y = self.x, self.y
weights = self.weights
# Basic tests with X, Y.
self.check(np.histogram2d, x, y,
value_args=(x.value, y.value),
expected_units=(None, x.unit, y.unit))
# Check units with density.
self.check(np.histogram2d, x, y, density=True,
value_args=(x.value, y.value),
expected_units=(1/(x.unit*y.unit), x.unit, y.unit))
# Check units with weights.
self.check(np.histogram2d, x, y, weights=weights,
value_args=(x.value, y.value),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit, y.unit))
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.] * u.m
self.check(np.histogram2d, x, y, [5, inb_y],
value_args=(x.value, y.value,
[5, np.array([0, 2.5, 100.])]),
expected_units=(None, x.unit, y.unit))
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.] * u.percent
self.check(np.histogram2d, x.value, y.value, bins=[5, inb2_y],
value_args=(x.value, y.value),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.])]),
expected_units=(None, u.one, u.one))
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogram2d(x, y, 125 * u.s)
with pytest.raises(TypeError):
np.histogram2d(x.value, y.value, 125 * u.s)
# Bin units need to match units of x, y.
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogram2d(x.value, y.value, [125, 200] * u.s)
@needs_array_function
def test_histogramdd(self):
# First replicates of the histogram2d tests, but using the
# histogramdd override. Normally takes the sample as a tuple
# with a given number of dimensions, and returns the histogram
# as well as a tuple of bin edges.
sample = self.x, self.y
sample_units = self.x.unit, self.y.unit
sample_values = (self.x.value, self.y.value)
weights = self.weights
# Basic tests with X, Y
self.check(np.histogramdd, sample,
value_args=(sample_values,),
expected_units=(None, sample_units))
# Check units with density.
self.check(np.histogramdd, sample, density=True,
value_args=(sample_values,),
expected_units=(1/(self.x.unit*self.y.unit),
sample_units))
# Check units with weights.
self.check(np.histogramdd, sample, weights=weights,
value_args=(sample_values,),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, sample_units))
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.] * u.m
self.check(np.histogramdd, sample, [5, inb_y],
value_args=(sample_values, [5, np.array([0, 2.5, 100.])]),
expected_units=(None, sample_units))
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.] * u.percent
self.check(np.histogramdd, sample_values, bins=[5, inb2_y],
value_args=(sample_values,),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.])]),
expected_units=(None, (u.one, u.one)))
# For quantities, it is probably not that likely one would pass
# in the sample as an array, but check that it works anyway.
# This also gives a 3-D check.
xyz = np.random.normal(size=(10, 3)) * u.m
self.check(np.histogramdd, xyz,
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,)*3))
# Passing it in as a tuple should work just as well; note the
# *last* axis contains the sample dimension.
self.check(np.histogramdd, (xyz[:, 0], xyz[:, 1], xyz[:, 2]),
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,)*3))
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogramdd(sample, 125 * u.s)
# Sequence of single items should be integer.
with pytest.raises(TypeError):
np.histogramdd(sample, [125, 200] * u.s)
with pytest.raises(TypeError):
np.histogramdd(sample_values, [125, 200] * u.s)
# Units of bins should match.
with pytest.raises(u.UnitsError):
np.histogramdd(sample, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogramdd(sample_values, ([125, 200] * u.s, [125, 200]))
@needs_array_function
def test_correlate(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.correlate(x1, x2)
expected = np.correlate(x1.value, x2.value) * u.m ** 2
assert np.all(out == expected)
@needs_array_function
def test_convolve(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.convolve(x1, x2)
expected = np.convolve(x1.value, x2.value) * u.m ** 2
assert np.all(out == expected)
@needs_array_function
def test_cov(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.cov(x)
@needs_array_function
def test_corrcoef(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.corrcoef(x)
class TestSortFunctions(InvariantUnitTestSetup):
def test_sort(self):
self.check(np.sort)
@needs_array_function
def test_sort_complex(self):
self.check(np.sort_complex)
def test_msort(self):
self.check(np.msort)
def test_partition(self):
self.check(np.partition, 2)
class TestStringFunctions(metaclass=CoverageMeta):
# For these, making behaviour work means deviating only slightly from
# the docstring, and by default they fail miserably. So, might as well.
def setup(self):
self.q = np.arange(3.) * u.Jy
@needs_array_function
def test_array2string(self):
# The default formatters cannot handle units, so if we do not pass
# a relevant formatter, we are better off just treating it as an
# array (which happens for all subtypes).
out0 = np.array2string(self.q)
expected0 = str(self.q.value)
assert out0 == expected0
# Arguments are interpreted as usual.
out1 = np.array2string(self.q, separator=', ')
expected1 = '[0., 1., 2.]'
assert out1 == expected1
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.q, separator=', ', formatter={'all': str})
expected2 = '[0.0 Jy, 1.0 Jy, 2.0 Jy]'
assert out2 == expected2
# Also as positional argument (no, nobody will do this!)
out3 = np.array2string(self.q, None, None, None, ', ', '',
np._NoValue, {'float': str})
assert out3 == expected2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.q, separator=', ', formatter={'int': str})
assert out4 == expected1
@needs_array_function
def test_array_repr(self):
out = np.array_repr(self.q)
assert out == "Quantity([0., 1., 2.], unit='Jy')"
q2 = self.q.astype('f4')
out2 = np.array_repr(q2)
assert out2 == "Quantity([0., 1., 2.], unit='Jy', dtype=float32)"
@needs_array_function
def test_array_str(self):
out = np.array_str(self.q)
expected = str(self.q)
assert out == expected
class TestBitAndIndexFunctions(metaclass=CoverageMeta):
# Index/bit functions generally fail for floats, so the usual
# float quantity are safe, but the integer ones are not.
def setup(self):
self.q = np.arange(3) * u.m
self.uint_q = u.Quantity(np.arange(3), 'm', dtype='u1')
@needs_array_function
def test_packbits(self):
with pytest.raises(TypeError):
np.packbits(self.q)
with pytest.raises(TypeError):
np.packbits(self.uint_q)
@needs_array_function
def test_unpackbits(self):
with pytest.raises(TypeError):
np.unpackbits(self.q)
with pytest.raises(TypeError):
np.unpackbits(self.uint_q)
@needs_array_function
def test_unravel_index(self):
with pytest.raises(TypeError):
np.unravel_index(self.q, 3)
with pytest.raises(TypeError):
np.unravel_index(self.uint_q, 3)
@needs_array_function
def test_ravel_multi_index(self):
with pytest.raises(TypeError):
np.ravel_multi_index((self.q,), 3)
with pytest.raises(TypeError):
np.ravel_multi_index((self.uint_q,), 3)
@needs_array_function
def test_ix_(self):
with pytest.raises(TypeError):
np.ix_(self.q)
with pytest.raises(TypeError):
np.ix_(self.uint_q)
class TestDtypeFunctions(NoUnitTestSetup):
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.q.dtype)
self.check(np.can_cast, 'f4')
def test_min_scalar_type(self):
out = np.min_scalar_type(self.q[0])
expected = np.min_scalar_type(self.q.value[0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
class TestMeshGrid(metaclass=CoverageMeta):
def test_meshgrid(self):
q1 = np.arange(3.) * u.m
q2 = np.arange(5.) * u.s
o1, o2 = np.meshgrid(q1, q2)
e1, e2 = np.meshgrid(q1.value, q2.value)
assert np.all(o1 == e1 * q1.unit)
assert np.all(o2 == e2 * q2.unit)
class TestMemoryFunctions(NoUnitTestSetup):
def test_shares_memory(self):
self.check(np.shares_memory, self.q.value)
def test_may_share_memory(self):
self.check(np.may_share_memory, self.q.value)
class TestSetOpsFcuntions(metaclass=CoverageMeta):
def setup(self):
self.q = np.array([[0., 1., -1.],
[3., 5., 3.],
[0., 1., -1]]) * u.m
self.q2 = np.array([0., 100., 150., 200.]) * u.cm
def check(self, function, qs, *args, **kwargs):
unit = kwargs.pop('unit', self.q.unit)
out = function(*qs, *args, **kwargs)
qv = tuple(q.to_value(self.q.unit) for q in qs)
expected = function(*qv, *args, **kwargs)
if isinstance(expected, tuple):
if unit:
expected = (expected[0] * unit,) + expected[1:]
for o, e in zip(out, expected):
assert_array_equal(o, e)
else:
if unit:
expected = expected * unit
assert_array_equal(out, expected)
def check1(self, function, *args, **kwargs):
self.check(function, (self.q,), *args, **kwargs)
def check2(self, function, *args, **kwargs):
self.check(function, (self.q, self.q2), *args, **kwargs)
@pytest.mark.parametrize('kwargs', (
dict(return_index=True, return_inverse=True),
dict(return_counts=True),
dict(return_index=True, return_inverse=True, return_counts=True)))
def test_unique(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize('kwargs', (
dict(axis=0),
dict(axis=1),
dict(return_counts=True, return_inverse=False, axis=1)))
def test_unique_more_complex(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize('kwargs', (
dict(),
dict(return_indices=True)))
def test_intersect1d(self, kwargs):
self.check2(np.intersect1d, **kwargs)
@needs_array_function
def test_setxor1d(self):
self.check2(np.setxor1d)
@needs_array_function
def test_union1d(self):
self.check2(np.union1d)
result = np.union1d(np.array([0., np.nan]), np.arange(3) << u.m)
assert result.unit is u.m
assert_array_equal(result.value, np.array([0., 1., 2., np.nan]))
@needs_array_function
def test_setdiff1d(self):
self.check2(np.setdiff1d)
@needs_array_function
def test_in1d(self):
self.check2(np.in1d, unit=None)
# Check zero is treated as having any unit.
assert np.in1d(np.zeros(1), self.q2)
with pytest.raises(u.UnitsError):
np.in1d(np.ones(1), self.q2)
@needs_array_function
def test_isin(self):
self.check2(np.isin, unit=None)
def test_ediff1d(self):
# ediff1d works always as it calls the Quantity method.
self.check1(np.ediff1d)
x = np.arange(10.) * u.m
out = np.ediff1d(x, to_begin=-12.5*u.cm, to_end=1*u.km)
expected = np.ediff1d(x.value, to_begin=-0.125, to_end=1000.) * x.unit
assert_array_equal(out, expected)
class TestDatetimeFunctions(BasicTestSetup):
def test_busday_count(self):
with pytest.raises(TypeError):
np.busday_count(self.q, self.q)
def test_busday_offset(self):
with pytest.raises(TypeError):
np.busday_offset(self.q, self.q)
def test_datetime_as_string(self):
with pytest.raises(TypeError):
np.datetime_as_string(self.q)
def test_is_busday(self):
with pytest.raises(TypeError):
np.is_busday(self.q)
# These functions always worked; ensure they do not regress.
# Note that they are *not* wrapped so no need to check coverage.
@pytest.mark.parametrize('function', [np.fft.fftfreq, np.fft.rfftfreq])
def test_fft_frequencies(function):
out = function(128, d=0.1*u.s)
expected = function(128, d=0.1) / u.s
assert_array_equal(out, expected)
@needs_array_function
class TestFFT(InvariantUnitTestSetup):
# These are all trivial, just preserve the unit.
def setup(self):
# Use real input; gets turned into complex as needed.
self.q = np.arange(128.).reshape(8, -1) * u.s
def test_fft(self):
self.check(np.fft.fft)
def test_ifft(self):
self.check(np.fft.ifft)
def test_rfft(self):
self.check(np.fft.rfft)
def test_irfft(self):
self.check(np.fft.irfft)
def test_fft2(self):
self.check(np.fft.fft2)
def test_ifft2(self):
self.check(np.fft.ifft2)
def test_rfft2(self):
self.check(np.fft.rfft2)
def test_irfft2(self):
self.check(np.fft.irfft2)
def test_fftn(self):
self.check(np.fft.fftn)
def test_ifftn(self):
self.check(np.fft.ifftn)
def test_rfftn(self):
self.check(np.fft.rfftn)
def test_irfftn(self):
self.check(np.fft.irfftn)
def test_hfft(self):
self.check(np.fft.hfft)
def test_ihfft(self):
self.check(np.fft.ihfft)
def test_fftshift(self):
self.check(np.fft.fftshift)
def test_ifftshift(self):
self.check(np.fft.ifftshift)
class TestLinAlg(metaclass=CoverageMeta):
def setup(self):
# Use a matrix safe for inversion, etc.
self.q = np.array([[1., -1., 2.],
[0., 3., -1.],
[-1., -1., 1.]]) << u.m
def test_cond(self):
c = np.linalg.cond(self.q)
expected = np.linalg.cond(self.q.value)
assert c == expected
def test_matrix_rank(self):
r = np.linalg.matrix_rank(self.q)
x = np.linalg.matrix_rank(self.q.value)
assert r == x
@needs_array_function
def test_matrix_rank_with_tol(self):
# Use a matrix that is not so good, so tol=1 and tol=0.01 differ.
q = np.arange(9.).reshape(3, 3) / 4 * u.m
tol = 1. * u.cm
r2 = np.linalg.matrix_rank(q, tol)
x2 = np.linalg.matrix_rank(q.value, tol.to_value(q.unit))
assert r2 == x2
def test_matrix_power(self):
q1 = np.linalg.matrix_power(self.q, 1)
assert_array_equal(q1, self.q)
q2 = np.linalg.matrix_power(self.q, 2)
assert_array_equal(q2, self.q @ self.q)
q2 = np.linalg.matrix_power(self.q, 4)
assert_array_equal(q2, self.q @ self.q @ self.q @ self.q)
@needs_array_function
def test_matrix_inv_power(self):
qinv = np.linalg.inv(self.q.value) / self.q.unit
qm1 = np.linalg.matrix_power(self.q, -1)
assert_array_equal(qm1, qinv)
qm3 = np.linalg.matrix_power(self.q, -3)
assert_array_equal(qm3, qinv @ qinv @ qinv)
@needs_array_function
def test_multi_dot(self):
q2 = np.linalg.multi_dot([self.q, self.q])
q2x = self.q @ self.q
assert_array_equal(q2, q2x)
q3 = np.linalg.multi_dot([self.q, self.q, self.q])
q3x = self.q @ self.q @ self.q
assert_array_equal(q3, q3x)
@needs_array_function
def test_svd(self):
m = np.arange(10.) * np.arange(5.)[:, np.newaxis] * u.m
svd_u, svd_s, svd_vt = np.linalg.svd(m, full_matrices=False)
svd_ux, svd_sx, svd_vtx = np.linalg.svd(m.value, full_matrices=False)
svd_sx <<= m.unit
assert_array_equal(svd_u, svd_ux)
assert_array_equal(svd_vt, svd_vtx)
assert_array_equal(svd_s, svd_sx)
assert u.allclose(svd_u @ np.diag(svd_s) @ svd_vt, m)
s2 = np.linalg.svd(m, compute_uv=False)
svd_s2x = np.linalg.svd(m.value, compute_uv=False) << m.unit
assert_array_equal(s2, svd_s2x)
@needs_array_function
def test_inv(self):
inv = np.linalg.inv(self.q)
expected = np.linalg.inv(self.q.value) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_pinv(self):
pinv = np.linalg.pinv(self.q)
expected = np.linalg.pinv(self.q.value) / self.q.unit
assert_array_equal(pinv, expected)
rcond = 0.01 * u.cm
pinv2 = np.linalg.pinv(self.q, rcond)
expected2 = np.linalg.pinv(self.q.value,
rcond.to_value(self.q.unit)) / self.q.unit
assert_array_equal(pinv2, expected2)
@needs_array_function
def test_tensorinv(self):
inv = np.linalg.tensorinv(self.q, ind=1)
expected = np.linalg.tensorinv(self.q.value, ind=1) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_det(self):
det = np.linalg.det(self.q)
expected = np.linalg.det(self.q.value)
expected <<= self.q.unit ** self.q.shape[-1]
assert_array_equal(det, expected)
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[0]) # Not 2-D
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[:-1]) # Not square.
@needs_array_function
def test_slogdet(self):
# TODO: Could be supported if we had a natural logarithm unit.
with pytest.raises(TypeError):
logdet = np.linalg.slogdet(self.q)
assert hasattr(logdet, 'unit')
@needs_array_function
def test_solve(self):
b = np.array([1., 2., 4.]) * u.m / u.s
x = np.linalg.solve(self.q, b)
xx = np.linalg.solve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_tensorsolve(self):
b = np.array([1., 2., 4.]) * u.m / u.s
x = np.linalg.tensorsolve(self.q, b)
xx = np.linalg.tensorsolve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_lstsq(self):
b = np.array([1., 2., 4.]) * u.m / u.s
x, residuals, rank, s = np.linalg.lstsq(self.q, b, rcond=None)
xx, residualsx, rankx, sx = np.linalg.lstsq(self.q.value, b.value,
rcond=None)
xx <<= b.unit / self.q.unit
residualsx <<= b.unit ** 2
sx <<= self.q.unit
assert_array_equal(x, xx)
assert_array_equal(residuals, residualsx)
assert_array_equal(s, sx)
assert rank == rankx
assert u.allclose(self.q @ x, b)
# Also do one where we can check the answer...
m = np.eye(3)
b = np.arange(3) * u.m
x, residuals, rank, s = np.linalg.lstsq(m, b, rcond=1.*u.percent)
assert_array_equal(x, b)
assert np.all(residuals == 0 * u.m**2)
assert rank == 3
assert_array_equal(s, np.array([1., 1., 1.]) << u.one)
with pytest.raises(u.UnitsError):
np.linalg.lstsq(m, b, rcond=1.*u.s)
@needs_array_function
def test_norm(self):
n = np.linalg.norm(self.q)
expected = np.linalg.norm(self.q.value) << self.q.unit
assert_array_equal(n, expected)
# Special case: 1-D, ord=0.
n1 = np.linalg.norm(self.q[0], ord=0)
expected1 = np.linalg.norm(self.q[0].value, ord=0) << u.one
assert_array_equal(n1, expected1)
@needs_array_function
def test_cholesky(self):
# Numbers from np.linalg.cholesky docstring.
q = np.array([[1, -2j], [2j, 5]]) * u.m
cd = np.linalg.cholesky(q)
cdx = np.linalg.cholesky(q.value) << q.unit ** 0.5
assert_array_equal(cd, cdx)
assert u.allclose(cd @ cd.T.conj(), q)
@needs_array_function
def test_qr(self):
# This is not exhaustive...
a = np.array([[1, -2j], [2j, 5]]) * u.m
q, r = np.linalg.qr(a)
qx, rx = np.linalg.qr(a.value)
qx <<= u.one
rx <<= a.unit
assert_array_equal(q, qx)
assert_array_equal(r, rx)
assert u.allclose(q @ r, a)
@needs_array_function
def test_eig(self):
w, v = np.linalg.eig(self.q)
wx, vx = np.linalg.eig(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w, v = np.linalg.eig(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
assert_array_equal(v, np.eye(3))
@needs_array_function
def test_eigvals(self):
w = np.linalg.eigvals(self.q)
wx = np.linalg.eigvals(self.q.value) << self.q.unit
assert_array_equal(w, wx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w = np.linalg.eigvals(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
@needs_array_function
def test_eigh(self):
w, v = np.linalg.eigh(self.q)
wx, vx = np.linalg.eigh(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
@needs_array_function
def test_eigvalsh(self):
w = np.linalg.eigvalsh(self.q)
wx = np.linalg.eigvalsh(self.q.value) << self.q.unit
assert_array_equal(w, wx)
class TestRecFunctions(metaclass=CoverageMeta):
def test_structured_to_unstructured(self):
# can't unstructure something with incompatible units
with pytest.raises(u.UnitConversionError, match="'m'"):
rfn.structured_to_unstructured(u.Quantity((0, 0.6), u.Unit("(eV, m)")))
# it works if all the units are equal
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, eV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 0.6] * u.eV)
# also if the units are convertible
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, keV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 600] * u.eV)
struct = u.Quantity((0, 0, 1.7827e-33), u.Unit("(eV, eV, g)"))
with u.add_enabled_equivalencies(u.mass_energy()):
unstruct = rfn.structured_to_unstructured(struct)
u.allclose(unstruct, [0, 0, 1.0000214] * u.eV)
# and if the dtype is nested
struct = [(5, (400.0, 3e6))] * u.Unit('m, (cm, um)')
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [[5, 4, 3]] * u.m)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_structured_to_unstructured``
def test_unstructured_to_structured(self):
unstruct = [1, 2, 3] * u.m
dtype=np.dtype([("f1", float), ("f2", float), ("f3", float)])
# it works
struct = rfn.unstructured_to_structured(unstruct, dtype=dtype)
assert struct.unit == u.Unit("(m, m, m)")
assert_array_equal(rfn.structured_to_unstructured(struct), unstruct)
# can't structure something that's already structured
with pytest.raises(ValueError, match="arr must have at least one dimension"):
rfn.unstructured_to_structured(struct, dtype=dtype)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_unstructured_to_structured``
untested_functions = set()
if NUMPY_LT_1_20:
financial_functions = {f for f in all_wrapped_functions.values()
if f in np.lib.financial.__dict__.values()}
untested_functions |= financial_functions
if NUMPY_LT_1_23:
deprecated_functions = {
# Deprecated, removed in numpy 1.23
np.asscalar, np.alen,
}
else:
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander
}
untested_functions |= poly_functions
rec_functions = {
rfn.rec_append_fields, rfn.rec_drop_fields, rfn.rec_join,
rfn.drop_fields, rfn.rename_fields, rfn.append_fields, rfn.join_by,
rfn.repack_fields, rfn.apply_along_fields, rfn.assign_fields_by_name,
rfn.merge_arrays, rfn.stack_arrays, rfn.find_duplicates,
rfn.recursive_fill_fields, rfn.require_fields,
}
untested_functions |= rec_functions
@needs_array_function
def test_testing_completeness():
assert not CoverageMeta.covered.intersection(untested_functions)
assert all_wrapped == (CoverageMeta.covered | untested_functions)
class TestFunctionHelpersCompleteness:
@pytest.mark.parametrize('one, two', itertools.combinations(
(SUBCLASS_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
set(FUNCTION_HELPERS.keys()),
set(DISPATCHED_FUNCTIONS.keys())), 2))
def test_no_duplicates(self, one, two):
assert not one.intersection(two)
@needs_array_function
def test_all_included(self):
included_in_helpers = (SUBCLASS_SAFE_FUNCTIONS |
UNSUPPORTED_FUNCTIONS |
set(FUNCTION_HELPERS.keys()) |
set(DISPATCHED_FUNCTIONS.keys()))
assert all_wrapped == included_in_helpers
# untested_function is created using all_wrapped_functions
@needs_array_function
def test_ignored_are_untested(self):
assert IGNORED_FUNCTIONS | TBD_FUNCTIONS == untested_functions
|
25e155277aec2c111231ee61db62d0750c8c40830f522d50d15c9985e0c9c25e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Unit tests for the handling of physical types in `astropy.units`.
"""
import pickle
import pytest
from astropy import units as u
from astropy.constants import hbar
from astropy.units import physical
from astropy.utils.exceptions import AstropyDeprecationWarning
unit_physical_type_pairs = [
(u.m, "length"),
(u.cm ** 3, "volume"),
(u.km / u.h, "speed"),
(u.barn * u.Mpc, "volume"),
(u.m * u.s ** 8, "unknown"),
(u.m / u.m, "dimensionless"),
(hbar.unit, "angular momentum"),
(u.erg / (u.cm ** 2 * u.s * u.AA), "spectral flux density wav"),
(u.photon / (u.cm ** 2 * u.s * u.AA), "photon flux density wav"),
(u.photon / (u.cm ** 2 * u.s * u.Hz), "photon flux density"),
(u.byte, "data quantity"),
(u.bit, "data quantity"),
(u.imperial.mi / u.week, "speed"),
(u.erg / u.s, "power"),
(u.C / u.s, "electrical current"),
(u.C / u.s / u.cm ** 2, "electrical current density"),
(u.T * u.m ** 2, "magnetic flux"),
(u.N * u.m, "energy"),
(u.rad / u.ms, "angular speed"),
(u.Unit(1), "dimensionless"),
(u.m ** 2, "area"),
(u.s, "time"),
(u.rad, "angle"),
(u.sr, "solid angle"),
(u.m / u.s ** 2, "acceleration"),
(u.Hz, "frequency"),
(u.g, "mass"),
(u.mol, "amount of substance"),
(u.K, "temperature"),
(u.deg_C, "temperature"),
(u.imperial.deg_F, "temperature"),
(u.imperial.deg_R, "temperature"),
(u.imperial.deg_R / u.m, "temperature_gradient"),
(u.N, "force"),
(u.J, "energy"),
(u.Pa, "pressure"),
(u.W, "power"),
(u.kg / u.m ** 3, "mass density"),
(u.m ** 3 / u.kg, "specific volume"),
(u.mol / u.m ** 3, "molar concentration"),
(u.kg * u.m / u.s, "momentum/impulse"),
(u.kg * u.m ** 2 / u.s, "angular momentum"),
(u.rad / u.s, "angular speed"),
(u.rad / u.s ** 2, "angular acceleration"),
(u.g / (u.m * u.s), "dynamic viscosity"),
(u.m ** 2 / u.s, "kinematic viscosity"),
(u.m ** -1, "wavenumber"),
(u.A, "electrical current"),
(u.C, "electrical charge"),
(u.V, "electrical potential"),
(u.Ohm, "electrical resistance"),
(u.S, "electrical conductance"),
(u.F, "electrical capacitance"),
(u.C * u.m, "electrical dipole moment"),
(u.A / u.m ** 2, "electrical current density"),
(u.V / u.m, "electrical field strength"),
(u.C / u.m ** 2, "electrical flux density"),
(u.C / u.m ** 3, "electrical charge density"),
(u.F / u.m, "permittivity"),
(u.Wb, "magnetic flux"),
(u.T, "magnetic flux density"),
(u.A / u.m, "magnetic field strength"),
(u.H / u.m, "electromagnetic field strength"),
(u.H, "inductance"),
(u.cd, "luminous intensity"),
(u.lm, "luminous flux"),
(u.lx, "luminous emittance/illuminance"),
(u.W / u.sr, "radiant intensity"),
(u.cd / u.m ** 2, "luminance"),
(u.astrophys.Jy, "spectral flux density"),
(u.astrophys.R, "photon flux"),
(u.misc.bit, "data quantity"),
(u.misc.bit / u.s, "bandwidth"),
(u.cgs.Franklin, "electrical charge (ESU)"),
(u.cgs.statampere, "electrical current (ESU)"),
(u.cgs.Biot, "electrical current (EMU)"),
(u.cgs.abcoulomb, "electrical charge (EMU)"),
(u.imperial.btu / (u.s * u.m * u.imperial.deg_F), "thermal conductivity"),
(u.imperial.cal / u.deg_C, "heat capacity"),
(u.imperial.cal / u.deg_C / u.g, "specific heat capacity"),
(u.J * u.m ** -2 * u.s ** -1, "energy flux"),
(u.W / u.m ** 2, "energy flux"),
(u.m ** 3 / u.mol, "molar volume"),
(u.m / u.S, "electrical resistivity"),
(u.S / u.m, "electrical conductivity"),
(u.A * u.m ** 2, "magnetic moment"),
(u.J / u.T, "magnetic moment"),
(u.yr ** -1 * u.Mpc ** -3, "volumetric rate"),
(u.m / u.s ** 3, "jerk"),
(u.m / u.s ** 4, "snap"),
(u.m / u.s ** 5, "crackle"),
(u.m / u.s ** 6, "pop"),
(u.deg_C / u.m, "temperature gradient"),
(u.imperial.deg_F / u.m, "temperature gradient"),
(u.imperial.deg_R / u.imperial.ft, "temperature gradient"),
(u.imperial.Calorie / u.g, "specific energy"),
(u.mol / u.L / u.s, "reaction rate"),
(u.imperial.lbf * u.imperial.ft * u.s ** 2, "moment of inertia"),
(u.mol / u.s, "catalytic activity"),
(u.imperial.kcal / u.deg_C / u.mol, "molar heat capacity"),
(u.mol / u.kg, "molality"),
(u.imperial.inch * u.hr, "absement"),
(u.imperial.ft ** 3 / u.s, "volumetric flow rate"),
(u.Hz / u.s, "frequency drift"),
(u.Pa ** -1, "compressibility"),
(u.dimensionless_unscaled, "dimensionless"),
]
@pytest.mark.parametrize("unit, physical_type", unit_physical_type_pairs)
def test_physical_type_names(unit, physical_type):
"""
Test that the `physical_type` attribute of `u.Unit` objects provides
the expected physical type for various units.
Many of these tests are used to test backwards compatibility.
"""
assert unit.physical_type == physical_type, (
f"{unit!r}.physical_type was expected to return "
f"{physical_type!r}, but instead returned {unit.physical_type!r}."
)
length = u.m.physical_type
time = u.s.physical_type
speed = (u.m / u.s).physical_type
area = (u.m ** 2).physical_type
wavenumber = (u.m ** -1).physical_type
dimensionless = u.dimensionless_unscaled.physical_type
pressure = u.Pa.physical_type
momentum = (u.kg * u.m / u.s).physical_type
@pytest.mark.parametrize(
"physical_type_representation, physical_type_name", [
(1.0, "dimensionless"),
(u.m, "length"),
("work", "work"),
(5 * u.m, "length"),
(length, length),
(u.Pa, "energy_density"), # attribute-accessible name
("energy_density", "energy_density") # attribute-accessible name
],
)
def test_getting_physical_type(physical_type_representation, physical_type_name):
"""Test different ways of getting a physical type."""
physical_type = physical.get_physical_type(physical_type_representation)
assert isinstance(physical_type, physical.PhysicalType)
assert physical_type == physical_type_name
@pytest.mark.parametrize(
"argument, exception", [
("unknown", ValueError),
("not a name of a physical type", ValueError),
({"this set cannot be made into a Quantity"}, TypeError),
]
)
def test_getting_physical_type_exceptions(argument, exception):
"""
Test that `get_physical_type` raises appropriate exceptions when
provided with invalid arguments.
"""
with pytest.raises(exception):
physical.get_physical_type(argument)
def test_physical_type_cannot_become_quantity():
"""
Test that `PhysicalType` instances cannot be cast into `Quantity`
objects. A failure in this test could be related to failures
in subsequent tests.
"""
with pytest.raises(TypeError):
u.Quantity(u.m.physical_type, u.m)
# left term, right term, operator, expected value
operation_parameters = [
(length, length, "__eq__", True),
(length, area, "__eq__", False),
(length, "length", "__eq__", True),
("length", length, "__eq__", NotImplemented),
(dimensionless, dimensionless, "__eq__", True),
(momentum, "momentum/impulse", "__eq__", True), # test delimiters in names
(pressure, "energy_density", "__eq__", True), # test underscores in names
((u.m ** 8).physical_type, "unknown", "__eq__", True),
((u.m ** 8).physical_type, (u.m ** 9).physical_type, "__eq__", False),
(length, length, "__ne__", False),
(speed, time, "__ne__", True),
(pressure, dimensionless, "__ne__", True),
(length, u.m, "__eq__", NotImplemented),
(length, length, "__mul__", area),
(speed, time, "__mul__", length),
(speed, time, "__rmul__", length),
(length, time, "__truediv__", speed),
(area, length, "__truediv__", length),
(length, area, "__rtruediv__", length),
(dimensionless, dimensionless, "__mul__", dimensionless),
(dimensionless, dimensionless, "__truediv__", dimensionless),
(length, 2, "__pow__", area),
(area, 0.5, "__pow__", length),
(dimensionless, 4, "__pow__", dimensionless),
(u.m, length, "__mul__", NotImplemented),
(3.2, length, "__mul__", NotImplemented),
(u.m, time, "__truediv__", NotImplemented),
(3.2, length, "__truediv__", NotImplemented),
(length, u.m, "__mul__", area),
(length, u.m, "__rmul__", area),
(speed, u.s, "__mul__", length),
(length, 1, "__mul__", length),
(length, 1, "__rmul__", length),
(length, u.s, "__truediv__", speed),
(area, 1, "__truediv__", area),
(time, u.m, "__rtruediv__", speed),
(length, 1.0, "__rtruediv__", wavenumber),
(length, 2, "__pow__", area),
(length, 32, "__mul__", NotImplemented),
(length, 0, "__rmul__", NotImplemented),
(length, 3.2, "__truediv__", NotImplemented),
(length, -1, "__rtruediv__", NotImplemented),
(length, "length", "__mul__", area),
(length, "length", "__rmul__", area),
(area, "length", "__truediv__", length),
(length, "area", "__rtruediv__", length),
]
@pytest.mark.parametrize("left, right, operator, expected", operation_parameters)
def test_physical_type_operations(left, right, operator, expected):
"""
Test that `PhysicalType` dunder methods that require another
argument behave as intended.
"""
assert getattr(left, operator)(right) == expected
unit_with_physical_type_set = [
(u.m, {"length"}),
(u.kg * u.m / u.s, {"impulse", "momentum"}),
(u.Pa, {"energy density", "pressure", "stress"}),
]
@pytest.mark.parametrize("unit, expected_set", unit_with_physical_type_set)
def test_physical_type_as_set(unit, expected_set):
"""Test making a `physical.PhysicalType` instance into a `set`."""
resulting_set = set(unit.physical_type)
assert resulting_set == expected_set
def test_physical_type_iteration():
"""Test iterating through different physical type names."""
physical_type_names = [physical_type_name for physical_type_name in pressure]
assert physical_type_names == ["energy density", "pressure", "stress"]
def test_physical_type_in():
"""
Test that `in` works as expected for `PhysicalType` objects with one
or multiple names.
"""
assert "length" in length
assert "pressure" in pressure
equivalent_unit_pairs = [
(u.m, u.m),
(u.m, u.cm),
(u.N, u.kg * u.m * u.s ** -2),
(u.barn * u.Mpc, u.cm ** 3),
(u.K, u.deg_C),
(u.K, u.imperial.deg_R),
(u.K, u.imperial.deg_F),
(u.deg_C, u.imperial.deg_F),
(u.m ** 18, u.pc ** 18),
]
@pytest.mark.parametrize("unit1, unit2", equivalent_unit_pairs)
def test_physical_type_instance_equality(unit1, unit2):
"""
Test that `physical.PhysicalType` instances for units of the same
dimensionality are equal.
"""
assert (unit1.physical_type == unit2.physical_type) is True
assert (unit1.physical_type != unit2.physical_type) is False
@pytest.mark.parametrize("unit1, unit2", equivalent_unit_pairs)
def test_get_physical_type_equivalent_pairs(unit1, unit2):
"""
Test that `get_physical_type` retrieves the same `PhysicalType`
instances for equivalent physical types, except for unknown types
which are not cataloged.
"""
physical_type1 = physical.get_physical_type(unit1)
physical_type2 = physical.get_physical_type(unit2)
assert physical_type1 == physical_type2
if physical_type1 != "unknown":
assert physical_type1 is physical_type2
nonequivalent_unit_pairs = [
(u.m, u.s),
(u.m ** 18, u.m ** 19),
(u.N, u.J),
(u.barn, u.imperial.deg_F),
]
@pytest.mark.parametrize("unit1, unit2", nonequivalent_unit_pairs)
def test_physical_type_instance_inequality(unit1, unit2):
"""
Test that `physical.PhysicalType` instances for units with different
dimensionality are considered unequal.
"""
physical_type1 = physical.PhysicalType(unit1, "ptype1")
physical_type2 = physical.PhysicalType(unit2, "ptype2")
assert (physical_type1 != physical_type2) is True
assert (physical_type1 == physical_type2) is False
physical_type_with_expected_str = [
(length, "length"),
(speed, "speed/velocity"),
(pressure, "energy density/pressure/stress"),
(u.deg_C.physical_type, "temperature"),
((u.J / u.K / u.kg).physical_type, "specific entropy/specific heat capacity"),
]
physical_type_with_expected_repr = [
(length, "PhysicalType('length')"),
(speed, "PhysicalType({'speed', 'velocity'})"),
(pressure, "PhysicalType({'energy density', 'pressure', 'stress'})"),
(u.deg_C.physical_type, "PhysicalType('temperature')"),
((u.J / u.K / u.kg).physical_type,
"PhysicalType({'specific entropy', 'specific heat capacity'})"),
]
@pytest.mark.parametrize("physical_type, expected_str", physical_type_with_expected_str)
def test_physical_type_str(physical_type, expected_str):
"""Test using `str` on a `PhysicalType` instance."""
assert str(physical_type) == expected_str
@pytest.mark.parametrize(
"physical_type, expected_repr", physical_type_with_expected_repr
)
def physical_type_repr(physical_type, expected_repr):
"""Test using `repr` on a `PhysicalType` instance."""
assert repr(physical_type) == expected_repr
def test_physical_type_hash():
"""Test that a `PhysicalType` instance can be used as a dict key."""
dictionary = {length: 42}
assert dictionary[length] == 42
@pytest.mark.parametrize("multiplicand", [list(), 42, 0, -1])
def test_physical_type_multiplication(multiplicand):
"""
Test that multiplication of a physical type returns `NotImplemented`
when attempted for an invalid type.
"""
with pytest.raises(TypeError):
length * multiplicand
def test_unrecognized_unit_physical_type():
"""
Test basic functionality for the physical type of an unrecognized
unit.
"""
unrecognized_unit = u.Unit("parrot", parse_strict="silent")
physical_type = unrecognized_unit.physical_type
assert isinstance(physical_type, physical.PhysicalType)
assert physical_type == "unknown"
invalid_inputs = [(42,), ("valid input", 42)]
@pytest.mark.parametrize("invalid_input", invalid_inputs)
def test_invalid_physical_types(invalid_input):
"""
Test that `PhysicalType` cannot be instantiated when one of the
supplied names is not a string, while making sure that the physical
type for the unit remains unknown.
"""
obscure_unit = u.s ** 87
with pytest.raises(ValueError):
physical.PhysicalType(obscure_unit, invalid_input)
assert obscure_unit.physical_type == "unknown"
class TestDefPhysType:
weird_unit = u.m ** 99
strange_unit = u.s ** 42
def test_attempt_to_define_unknown_physical_type(self):
"""Test that a unit cannot be defined as unknown."""
with pytest.raises(ValueError):
physical.def_physical_type(self.weird_unit, "unknown")
assert "unknown" not in physical._unit_physical_mapping
def test_multiple_same_physical_type_names(self):
"""
Test that `def_physical_type` raises an exception when it tries to
set the physical type of a new unit as the name of an existing
physical type.
"""
with pytest.raises(ValueError):
physical.def_physical_type(self.weird_unit, {"time", "something"})
assert self.weird_unit.physical_type == "unknown"
def test_expanding_names_for_physical_type(self):
"""
Test that calling `def_physical_type` on an existing physical
type adds a new physical type name.
"""
weird_name = "weird name"
strange_name = "strange name"
try:
physical.def_physical_type(self.weird_unit, weird_name)
assert (
self.weird_unit.physical_type == weird_name
), f"unable to set physical type for {self.weird_unit}"
except Exception:
raise
finally: # cleanup added name
physical._attrname_physical_mapping.pop(weird_name.replace(' ', '_'), None)
physical._name_physical_mapping.pop(weird_name, None)
# add both strange_name and weird_name
try:
physical.def_physical_type(self.weird_unit, strange_name)
assert set((self.weird_unit).physical_type) == {
weird_name,
strange_name,
}, f"did not correctly append a new physical type name."
except Exception:
raise
finally: # cleanup added names
physical._attrname_physical_mapping.pop(strange_name.replace(' ', '_'), None)
physical._name_physical_mapping.pop(strange_name, None)
physical._attrname_physical_mapping.pop(weird_name.replace(' ', '_'), None)
physical._name_physical_mapping.pop(weird_name, None)
def test_redundant_physical_type(self):
"""
Test that a physical type name already in use cannot be assigned
for another unit (excluding `"unknown"`).
"""
with pytest.raises(ValueError):
physical.def_physical_type(self.weird_unit, "length")
@staticmethod
def _undef_physical_type(unit):
"""Reset the physical type of unit to "unknown"."""
for name in list(unit.physical_type):
del physical._unit_physical_mapping[name]
del physical._physical_unit_mapping[unit._get_physical_type_id()]
assert unit.physical_type == "unknown"
def teardown_method(self):
"""
Remove the definitions of the physical types that were added
using `def_physical_unit` for testing purposes.
"""
for unit in [self.weird_unit, self.strange_unit]:
physical_type = physical.get_physical_type(unit)
if physical_type != "unknown":
self._undef_physical_type(unit)
assert unit.physical_type == "unknown", (
f"the physical type for {unit}, which was added for"
f"testing, was not deleted."
)
@pytest.mark.parametrize("method, expected", [
("title", 'Length'), ("isalpha", True), ("isnumeric", False), ("upper", 'LENGTH')
])
def test_that_str_methods_work_with_physical_types(method, expected):
"""
Test that str methods work for `PhysicalType` instances while issuing
a deprecation warning.
"""
with pytest.warns(AstropyDeprecationWarning, match="PhysicalType instances"):
result_of_method_call = getattr(length, method)()
assert result_of_method_call == expected
def test_missing_physical_type_attribute():
"""
Test that a missing attribute raises an `AttributeError`.
This test should be removed when the deprecated option of calling
string methods on PhysicalType instances is removed from
`PhysicalType.__getattr__`.
"""
with pytest.raises(AttributeError):
length.not_the_name_of_a_str_or_physical_type_attribute
@pytest.mark.parametrize('ptype_name', ['length', 'speed', 'entropy'])
def test_pickling(ptype_name):
# Regression test for #11685
ptype = u.get_physical_type(ptype_name)
pkl = pickle.dumps(ptype)
other = pickle.loads(pkl)
assert other == ptype
def test_physical_types_module_access():
# all physical type names in dir
assert set(dir(physical)).issuperset(physical._attrname_physical_mapping.keys())
assert set(dir(physical)).issuperset(physical.__all__)
# all physical type can be accessed by name
for pname in physical._attrname_physical_mapping.keys():
ptype = physical._attrname_physical_mapping[pname]
assert hasattr(physical, pname) # make sure works in lazy load
assert getattr(physical, pname) is ptype
# a failed access
with pytest.raises(AttributeError, match="has no attribute"):
physical.not_a_valid_physical_type_name
|
7f2b2bacc519610c9d46d9db6a9e419e9396edb022919af96c02e9d3568697e7 | # The purpose of these tests are to ensure that calling ufuncs with quantities
# returns quantities with the right units, or raises exceptions.
import concurrent.futures
import warnings
from collections import namedtuple
import numpy as np
import pytest
from erfa import ufunc as erfa_ufunc
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.units import quantity_helper as qh
from astropy.units.quantity_helper.converters import UfuncHelpers
from astropy.units.quantity_helper.helpers import helper_sqrt
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
testcase = namedtuple('testcase', ['f', 'q_in', 'q_out'])
testexc = namedtuple('testexc', ['f', 'q_in', 'exc', 'msg'])
testwarn = namedtuple('testwarn', ['f', 'q_in', 'wfilter'])
@pytest.mark.skip
def test_testcase(tc):
results = tc.f(*tc.q_in)
# careful of the following line, would break on a function returning
# a single tuple (as opposed to tuple of return values)
results = (results, ) if type(results) != tuple else results
for result, expected in zip(results, tc.q_out):
assert result.unit == expected.unit
assert_allclose(result.value, expected.value, atol=1.E-15)
@pytest.mark.skip
def test_testexc(te):
with pytest.raises(te.exc) as exc:
te.f(*te.q_in)
if te.msg is not None:
assert te.msg in exc.value.args[0]
@pytest.mark.skip
def test_testwarn(tw):
with warnings.catch_warnings():
warnings.filterwarnings(tw.wfilter)
tw.f(*tw.q_in)
class TestUfuncHelpers:
# Note that this test should work even if scipy is present, since
# the scipy.special ufuncs are only loaded on demand.
# The test passes independently of whether erfa is already loaded
# (which will be the case for a full test, since coordinates uses it).
def test_coverage(self):
"""Test that we cover all ufunc's"""
all_np_ufuncs = set([ufunc for ufunc in np.core.umath.__dict__.values()
if isinstance(ufunc, np.ufunc)])
all_q_ufuncs = (qh.UNSUPPORTED_UFUNCS |
set(qh.UFUNC_HELPERS.keys()))
# Check that every numpy ufunc is covered.
assert all_np_ufuncs - all_q_ufuncs == set()
# Check that all ufuncs we cover come from numpy or erfa.
# (Since coverage for erfa is incomplete, we do not check
# this the other way).
all_erfa_ufuncs = set([ufunc for ufunc in erfa_ufunc.__dict__.values()
if isinstance(ufunc, np.ufunc)])
assert (all_q_ufuncs - all_np_ufuncs - all_erfa_ufuncs == set())
def test_scipy_registered(self):
# Should be registered as existing even if scipy is not available.
assert 'scipy.special' in qh.UFUNC_HELPERS.modules
def test_removal_addition(self):
assert np.add in qh.UFUNC_HELPERS
assert np.add not in qh.UNSUPPORTED_UFUNCS
qh.UFUNC_HELPERS[np.add] = None
assert np.add not in qh.UFUNC_HELPERS
assert np.add in qh.UNSUPPORTED_UFUNCS
qh.UFUNC_HELPERS[np.add] = qh.UFUNC_HELPERS[np.subtract]
assert np.add in qh.UFUNC_HELPERS
assert np.add not in qh.UNSUPPORTED_UFUNCS
@pytest.mark.slow
def test_thread_safety(self, fast_thread_switching):
def dummy_ufunc(*args, **kwargs):
return np.sqrt(*args, **kwargs)
def register():
return {dummy_ufunc: helper_sqrt}
workers = 8
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
for p in range(10000):
helpers = UfuncHelpers()
helpers.register_module(
'astropy.units.tests.test_quantity_ufuncs',
['dummy_ufunc'],
register
)
futures = [executor.submit(lambda: helpers[dummy_ufunc]) for i in range(workers)]
values = [future.result() for future in futures]
assert values == [helper_sqrt] * workers
class TestQuantityTrigonometricFuncs:
"""
Test trigonometric functions
"""
@pytest.mark.parametrize('tc', (
testcase(
f=np.sin,
q_in=(30. * u.degree, ),
q_out=(0.5*u.dimensionless_unscaled, )
),
testcase(
f=np.sin,
q_in=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ),
q_out=(np.array([0., 1. / np.sqrt(2.), 1.]) * u.one, )
),
testcase(
f=np.arcsin,
q_in=(np.sin(30. * u.degree), ),
q_out=(np.radians(30.) * u.radian, )
),
testcase(
f=np.arcsin,
q_in=(np.sin(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian), ),
q_out=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, )
),
testcase(
f=np.cos,
q_in=(np.pi / 3. * u.radian, ),
q_out=(0.5 * u.dimensionless_unscaled, )
),
testcase(
f=np.cos,
q_in=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ),
q_out=(np.array([1., 1. / np.sqrt(2.), 0.]) * u.one, )
),
testcase(
f=np.arccos,
q_in=(np.cos(np.pi / 3. * u.radian), ),
q_out=(np.pi / 3. * u.radian, )
),
testcase(
f=np.arccos,
q_in=(np.cos(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian), ),
q_out=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ),
),
testcase(
f=np.tan,
q_in=(np.pi / 3. * u.radian, ),
q_out=(np.sqrt(3.) * u.dimensionless_unscaled, )
),
testcase(
f=np.tan,
q_in=(np.array([0., 45., 135., 180.]) * u.degree, ),
q_out=(np.array([0., 1., -1., 0.]) * u.dimensionless_unscaled, )
),
testcase(
f=np.arctan,
q_in=(np.tan(np.pi / 3. * u.radian), ),
q_out=(np.pi / 3. * u.radian, )
),
testcase(
f=np.arctan,
q_in=(np.tan(np.array([10., 30., 70., 80.]) * u.degree), ),
q_out=(np.radians(np.array([10., 30., 70., 80.]) * u.degree), )
),
testcase(
f=np.arctan2,
q_in=(np.array([10., 30., 70., 80.]) * u.m, 2.0 * u.km),
q_out=(np.arctan2(np.array([10., 30., 70., 80.]),
2000.) * u.radian, )
),
testcase(
f=np.arctan2,
q_in=((np.array([10., 80.]) * u.m / (2.0 * u.km)).to(u.one), 1.),
q_out=(np.arctan2(np.array([10., 80.]) / 2000., 1.) * u.radian, )
),
testcase(
f=np.deg2rad,
q_in=(180. * u.degree, ),
q_out=(np.pi * u.radian, )
),
testcase(
f=np.radians,
q_in=(180. * u.degree, ),
q_out=(np.pi * u.radian, )
),
testcase(
f=np.deg2rad,
q_in=(3. * u.radian, ),
q_out=(3. * u.radian, )
),
testcase(
f=np.radians,
q_in=(3. * u.radian, ),
q_out=(3. * u.radian, )
),
testcase(
f=np.rad2deg,
q_in=(60. * u.degree, ),
q_out=(60. * u.degree, )
),
testcase(
f=np.degrees,
q_in=(60. * u.degree, ),
q_out=(60. * u.degree, )
),
testcase(
f=np.rad2deg,
q_in=(np.pi * u.radian, ),
q_out=(180. * u.degree, )
),
testcase(
f=np.degrees,
q_in=(np.pi * u.radian, ),
q_out=(180. * u.degree, )
)
))
def test_testcases(self, tc):
return test_testcase(tc)
@pytest.mark.parametrize('te', (
testexc(
f=np.deg2rad,
q_in=(3. * u.m, ),
exc=TypeError,
msg=None
),
testexc(
f=np.radians,
q_in=(3. * u.m, ),
exc=TypeError,
msg=None
),
testexc(
f=np.rad2deg,
q_in=(3. * u.m),
exc=TypeError,
msg=None
),
testexc(
f=np.degrees,
q_in=(3. * u.m),
exc=TypeError,
msg=None
),
testexc(
f=np.sin,
q_in=(3. * u.m, ),
exc=TypeError,
msg="Can only apply 'sin' function to quantities with angle units"
),
testexc(
f=np.arcsin,
q_in=(3. * u.m, ),
exc=TypeError,
msg="Can only apply 'arcsin' function to dimensionless quantities"
),
testexc(
f=np.cos,
q_in=(3. * u.s, ),
exc=TypeError,
msg="Can only apply 'cos' function to quantities with angle units"
),
testexc(
f=np.arccos,
q_in=(3. * u.s, ),
exc=TypeError,
msg="Can only apply 'arccos' function to dimensionless quantities"
),
testexc(
f=np.tan,
q_in=(np.array([1, 2, 3]) * u.N, ),
exc=TypeError,
msg="Can only apply 'tan' function to quantities with angle units"
),
testexc(
f=np.arctan,
q_in=(np.array([1, 2, 3]) * u.N, ),
exc=TypeError,
msg="Can only apply 'arctan' function to dimensionless quantities"
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1. * u.s),
exc=u.UnitsError,
msg="compatible dimensions"
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1.),
exc=u.UnitsError,
msg="dimensionless quantities when other arg"
)
))
def test_testexcs(self, te):
return test_testexc(te)
@pytest.mark.parametrize('tw', (
testwarn(
f=np.arcsin,
q_in=(27. * u.pc / (15 * u.kpc), ),
wfilter='error'
),
))
def test_testwarns(self, tw):
return test_testwarn(tw)
class TestQuantityMathFuncs:
"""
Test other mathematical functions
"""
def test_multiply_scalar(self):
assert np.multiply(4. * u.m, 2. / u.s) == 8. * u.m / u.s
assert np.multiply(4. * u.m, 2.) == 8. * u.m
assert np.multiply(4., 2. / u.s) == 8. / u.s
def test_multiply_array(self):
assert np.all(np.multiply(np.arange(3.) * u.m, 2. / u.s) ==
np.arange(0, 6., 2.) * u.m / u.s)
@pytest.mark.skipif(not isinstance(getattr(np, 'matmul', None), np.ufunc),
reason="np.matmul is not yet a gufunc")
def test_matmul(self):
q = np.arange(3.) * u.m
r = np.matmul(q, q)
assert r == 5. * u.m ** 2
# less trivial case.
q1 = np.eye(3) * u.m
q2 = np.array([[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]],
[[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]]]) / u.s
r2 = np.matmul(q1, q2)
assert np.all(r2 == np.matmul(q1.value, q2.value) * q1.unit * q2.unit)
@pytest.mark.parametrize('function', (np.divide, np.true_divide))
def test_divide_scalar(self, function):
assert function(4. * u.m, 2. * u.s) == function(4., 2.) * u.m / u.s
assert function(4. * u.m, 2.) == function(4., 2.) * u.m
assert function(4., 2. * u.s) == function(4., 2.) / u.s
@pytest.mark.parametrize('function', (np.divide, np.true_divide))
def test_divide_array(self, function):
assert np.all(function(np.arange(3.) * u.m, 2. * u.s) ==
function(np.arange(3.), 2.) * u.m / u.s)
def test_floor_divide_remainder_and_divmod(self):
inch = u.Unit(0.0254 * u.m)
dividend = np.array([1., 2., 3.]) * u.m
divisor = np.array([3., 4., 5.]) * inch
quotient = dividend // divisor
remainder = dividend % divisor
assert_allclose(quotient.value, [13., 19., 23.])
assert quotient.unit == u.dimensionless_unscaled
assert_allclose(remainder.value, [0.0094, 0.0696, 0.079])
assert remainder.unit == dividend.unit
quotient2 = np.floor_divide(dividend, divisor)
remainder2 = np.remainder(dividend, divisor)
assert np.all(quotient2 == quotient)
assert np.all(remainder2 == remainder)
quotient3, remainder3 = divmod(dividend, divisor)
assert np.all(quotient3 == quotient)
assert np.all(remainder3 == remainder)
with pytest.raises(TypeError):
divmod(dividend, u.km)
with pytest.raises(TypeError):
dividend // u.km
with pytest.raises(TypeError):
dividend % u.km
quotient4, remainder4 = np.divmod(dividend, divisor)
assert np.all(quotient4 == quotient)
assert np.all(remainder4 == remainder)
with pytest.raises(TypeError):
np.divmod(dividend, u.km)
def test_sqrt_scalar(self):
assert np.sqrt(4. * u.m) == 2. * u.m ** 0.5
def test_sqrt_array(self):
assert np.all(np.sqrt(np.array([1., 4., 9.]) * u.m)
== np.array([1., 2., 3.]) * u.m ** 0.5)
def test_square_scalar(self):
assert np.square(4. * u.m) == 16. * u.m ** 2
def test_square_array(self):
assert np.all(np.square(np.array([1., 2., 3.]) * u.m)
== np.array([1., 4., 9.]) * u.m ** 2)
def test_reciprocal_scalar(self):
assert np.reciprocal(4. * u.m) == 0.25 / u.m
def test_reciprocal_array(self):
assert np.all(np.reciprocal(np.array([1., 2., 4.]) * u.m)
== np.array([1., 0.5, 0.25]) / u.m)
def test_heaviside_scalar(self):
assert np.heaviside(0. * u.m, 0.5) == 0.5 * u.dimensionless_unscaled
assert np.heaviside(0. * u.s,
25 * u.percent) == 0.25 * u.dimensionless_unscaled
assert np.heaviside(2. * u.J, 0.25) == 1. * u.dimensionless_unscaled
def test_heaviside_array(self):
values = np.array([-1., 0., 0., +1.])
halfway = np.array([0.75, 0.25, 0.75, 0.25]) * u.dimensionless_unscaled
assert np.all(np.heaviside(values * u.m,
halfway * u.dimensionless_unscaled) ==
[0, 0.25, 0.75, +1.] * u.dimensionless_unscaled)
@pytest.mark.parametrize('function', (np.cbrt, ))
def test_cbrt_scalar(self, function):
assert function(8. * u.m**3) == 2. * u.m
@pytest.mark.parametrize('function', (np.cbrt, ))
def test_cbrt_array(self, function):
# Calculate cbrt on both sides since on Windows the cube root of 64
# does not exactly equal 4. See 4388.
values = np.array([1., 8., 64.])
assert np.all(function(values * u.m**3) ==
function(values) * u.m)
def test_power_scalar(self):
assert np.power(4. * u.m, 2.) == 16. * u.m ** 2
assert np.power(4., 200. * u.cm / u.m) == \
u.Quantity(16., u.dimensionless_unscaled)
# regression check on #1696
assert np.power(4. * u.m, 0.) == 1. * u.dimensionless_unscaled
def test_power_array(self):
assert np.all(np.power(np.array([1., 2., 3.]) * u.m, 3.)
== np.array([1., 8., 27.]) * u.m ** 3)
# regression check on #1696
assert np.all(np.power(np.arange(4.) * u.m, 0.) ==
1. * u.dimensionless_unscaled)
def test_float_power_array(self):
assert np.all(np.float_power(np.array([1., 2., 3.]) * u.m, 3.)
== np.array([1., 8., 27.]) * u.m ** 3)
# regression check on #1696
assert np.all(np.float_power(np.arange(4.) * u.m, 0.) ==
1. * u.dimensionless_unscaled)
def test_power_array_array(self):
with pytest.raises(ValueError):
np.power(4. * u.m, [2., 4.])
def test_power_array_array2(self):
with pytest.raises(ValueError):
np.power([2., 4.] * u.m, [2., 4.])
def test_power_array_array3(self):
# Identical unit fractions are converted automatically to dimensionless
# and should be allowed as base for np.power: #4764
q = [2., 4.] * u.m / u.m
powers = [2., 4.]
res = np.power(q, powers)
assert np.all(res.value == q.value ** powers)
assert res.unit == u.dimensionless_unscaled
# The same holds for unit fractions that are scaled dimensionless.
q2 = [2., 4.] * u.m / u.cm
# Test also against different types of exponent
for cls in (list, tuple, np.array, np.ma.array, u.Quantity):
res2 = np.power(q2, cls(powers))
assert np.all(res2.value == q2.to_value(1) ** powers)
assert res2.unit == u.dimensionless_unscaled
# Though for single powers, we keep the composite unit.
res3 = q2 ** 2
assert np.all(res3.value == q2.value ** 2)
assert res3.unit == q2.unit ** 2
assert np.all(res3 == q2 ** [2, 2])
def test_power_invalid(self):
with pytest.raises(TypeError) as exc:
np.power(3., 4. * u.m)
assert "raise something to a dimensionless" in exc.value.args[0]
def test_copysign_scalar(self):
assert np.copysign(3 * u.m, 1.) == 3. * u.m
assert np.copysign(3 * u.m, 1. * u.s) == 3. * u.m
assert np.copysign(3 * u.m, -1.) == -3. * u.m
assert np.copysign(3 * u.m, -1. * u.s) == -3. * u.m
def test_copysign_array(self):
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1.) ==
-np.array([1., 2., 3.]) * u.s)
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1. * u.m) ==
-np.array([1., 2., 3.]) * u.s)
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s,
np.array([-2., 2., -4.]) * u.m) ==
np.array([-1., 2., -3.]) * u.s)
q = np.copysign(np.array([1., 2., 3.]), -3 * u.m)
assert np.all(q == np.array([-1., -2., -3.]))
assert not isinstance(q, u.Quantity)
def test_ldexp_scalar(self):
assert np.ldexp(4. * u.m, 2) == 16. * u.m
def test_ldexp_array(self):
assert np.all(np.ldexp(np.array([1., 2., 3.]) * u.m, [3, 2, 1])
== np.array([8., 8., 6.]) * u.m)
def test_ldexp_invalid(self):
with pytest.raises(TypeError):
np.ldexp(3. * u.m, 4.)
with pytest.raises(TypeError):
np.ldexp(3., u.Quantity(4, u.m, dtype=int))
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_scalar(self, function):
q = function(3. * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(0.5)
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_array(self, function):
q = function(np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value
== function(np.array([1. / 3., 1. / 2., 1.])))
# should also work on quantities that can be made dimensionless
q2 = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value,
function(np.array([100. / 3., 100. / 2., 100.])))
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_invalid_units(self, function):
# Can't use exp() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
function(3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{}' function to "
"dimensionless quantities"
.format(function.__name__))
def test_modf_scalar(self):
q = np.modf(9. * u.m / (600. * u.cm))
assert q == (0.5 * u.dimensionless_unscaled,
1. * u.dimensionless_unscaled)
def test_modf_array(self):
v = np.arange(10.) * u.m / (500. * u.cm)
q = np.modf(v)
n = np.modf(v.to_value(u.dimensionless_unscaled))
assert q[0].unit == u.dimensionless_unscaled
assert q[1].unit == u.dimensionless_unscaled
assert all(q[0].value == n[0])
assert all(q[1].value == n[1])
def test_frexp_scalar(self):
q = np.frexp(3. * u.m / (6. * u.m))
assert q == (np.array(0.5), np.array(0.0))
def test_frexp_array(self):
q = np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert all((_q0, _q1) == np.frexp(_d) for _q0, _q1, _d
in zip(q[0], q[1], [1. / 3., 1. / 2., 1.]))
def test_frexp_invalid_units(self):
# Can't use prod() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
np.frexp(3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply 'frexp' function to "
"unscaled dimensionless quantities")
# also does not work on quantities that can be made dimensionless
with pytest.raises(TypeError) as exc:
np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert exc.value.args[0] == ("Can only apply 'frexp' function to "
"unscaled dimensionless quantities")
@pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_array(self, function):
q = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm), 1.)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value,
function(np.array([100. / 3., 100. / 2., 100.]), 1.))
@pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_invalid_units(self, function):
with pytest.raises(TypeError) as exc:
function(1. * u.km / u.s, 3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{}' function to "
"dimensionless quantities"
.format(function.__name__))
class TestInvariantUfuncs:
@pytest.mark.parametrize(('ufunc'), [np.absolute, np.fabs,
np.conj, np.conjugate,
np.negative, np.spacing, np.rint,
np.floor, np.ceil, np.positive])
def test_invariant_scalar(self, ufunc):
q_i = 4.7 * u.m
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert q_o.value == ufunc(q_i.value)
@pytest.mark.parametrize(('ufunc'), [np.absolute, np.conjugate,
np.negative, np.rint,
np.floor, np.ceil])
def test_invariant_array(self, ufunc):
q_i = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert np.all(q_o.value == ufunc(q_i.value))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_scalar(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.km
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_array(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10., -5., 1.e6]) * u.g / u.us
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(('ufunc', 'arbitrary'), [
(np.add, 0.), (np.subtract, 0.), (np.hypot, 0.),
(np.maximum, 0.), (np.minimum, 0.), (np.nextafter, 0.),
(np.remainder, np.inf), (np.mod, np.inf), (np.fmod, np.inf)])
def test_invariant_twoarg_one_arbitrary(self, ufunc, arbitrary):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i1, arbitrary)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, arbitrary))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError) as exc:
ufunc(q_i1, q_i2)
assert "compatible dimensions" in exc.value.args[0]
class TestComparisonUfuncs:
@pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal,
np.less, np.less_equal,
np.not_equal, np.equal])
def test_comparison_valid_units(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10., -5., 1.e6]) * u.g / u.Ms
q_o = ufunc(q_i1, q_i2)
assert not isinstance(q_o, u.Quantity)
assert q_o.dtype == bool
assert np.all(q_o == ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
q_o2 = ufunc(q_i1 / q_i2, 2.)
assert not isinstance(q_o2, u.Quantity)
assert q_o2.dtype == bool
assert np.all(q_o2 == ufunc((q_i1 / q_i2)
.to_value(u.dimensionless_unscaled), 2.))
# comparison with 0., inf, nan is OK even for dimensional quantities
# (though ignore numpy runtime warnings for comparisons with nan).
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
for arbitrary_unit_value in (0., np.inf, np.nan):
ufunc(q_i1, arbitrary_unit_value)
ufunc(q_i1, arbitrary_unit_value*np.ones(len(q_i1)))
# and just for completeness
ufunc(q_i1, np.array([0., np.inf, np.nan]))
@pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal,
np.less, np.less_equal,
np.not_equal, np.equal])
def test_comparison_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError) as exc:
ufunc(q_i1, q_i2)
assert "compatible dimensions" in exc.value.args[0]
@pytest.mark.parametrize('ufunc', (np.isfinite, np.isinf, np.isnan,
np.signbit))
def test_onearg_test_ufuncs(self, ufunc):
q = [1., np.inf, -np.inf, np.nan, -1., 0.] * u.m
out = ufunc(q)
assert not isinstance(out, u.Quantity)
assert out.dtype == bool
assert np.all(out == ufunc(q.value))
# Ignore RuntimeWarning raised on Windows and s390.
@pytest.mark.filterwarnings('ignore:.*invalid value encountered in sign')
def test_sign(self):
q = [1., np.inf, -np.inf, np.nan, -1., 0.] * u.m
out = np.sign(q)
assert not isinstance(out, u.Quantity)
assert out.dtype == q.dtype
assert np.all((out == np.sign(q.value)) |
(np.isnan(out) & np.isnan(q.value)))
class TestInplaceUfuncs:
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_ufunc_inplace(self, value):
# without scaling
s = value * u.rad
check = s
np.sin(s, out=s)
assert check is s
assert check.unit == u.dimensionless_unscaled
# with scaling
s2 = (value * u.rad).to(u.deg)
check2 = s2
np.sin(s2, out=s2)
assert check2 is s2
assert check2.unit == u.dimensionless_unscaled
assert_allclose(s.value, s2.value)
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_ufunc_inplace_2(self, value):
"""Check inplace works with non-quantity input and quantity output"""
s = value * u.m
check = s
np.absolute(value, out=s)
assert check is s
assert np.all(check.value == np.absolute(value))
assert check.unit is u.dimensionless_unscaled
np.sqrt(value, out=s)
assert check is s
assert np.all(check.value == np.sqrt(value))
assert check.unit is u.dimensionless_unscaled
np.exp(value, out=s)
assert check is s
assert np.all(check.value == np.exp(value))
assert check.unit is u.dimensionless_unscaled
np.arcsin(value/10., out=s)
assert check is s
assert np.all(check.value == np.arcsin(value/10.))
assert check.unit is u.radian
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_two_output_ufunc_inplace(self, value):
v = 100. * value * u.cm / u.m
v_copy = v.copy()
tmp = v.copy()
check = v
np.modf(v, tmp, v)
assert check is v
assert check.unit == u.dimensionless_unscaled
v2 = v_copy.to(u.dimensionless_unscaled)
check2 = v2
np.modf(v2, tmp, v2)
assert check2 is v2
assert check2.unit == u.dimensionless_unscaled
# can also replace in last position if no scaling is needed
v3 = v_copy.to(u.dimensionless_unscaled)
check3 = v3
np.modf(v3, v3, tmp)
assert check3 is v3
assert check3.unit == u.dimensionless_unscaled
# can also replace input with first output when scaling
v4 = v_copy.copy()
check4 = v4
np.modf(v4, v4, tmp)
assert check4 is v4
assert check4.unit == u.dimensionless_unscaled
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_ufunc_inplace_1(self, value):
s = value * u.cycle
check = s
s /= 2.
assert check is s
assert np.all(check.value == value / 2.)
s /= u.s
assert check is s
assert check.unit == u.cycle / u.s
s *= 2. * u.s
assert check is s
assert np.all(check == value * u.cycle)
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_ufunc_inplace_2(self, value):
s = value * u.cycle
check = s
np.arctan2(s, s, out=s)
assert check is s
assert check.unit == u.radian
with pytest.raises(u.UnitsError):
s += 1. * u.m
assert check is s
assert check.unit == u.radian
np.arctan2(1. * u.deg, s, out=s)
assert check is s
assert check.unit == u.radian
np.add(1. * u.deg, s, out=s)
assert check is s
assert check.unit == u.deg
np.multiply(2. / u.s, s, out=s)
assert check is s
assert check.unit == u.deg / u.s
def test_two_argument_ufunc_inplace_3(self):
s = np.array([1., 2., 3.]) * u.dimensionless_unscaled
np.add(np.array([1., 2., 3.]), np.array([1., 2., 3.]) * 2., out=s)
assert np.all(s.value == np.array([3., 6., 9.]))
assert s.unit is u.dimensionless_unscaled
np.arctan2(np.array([1., 2., 3.]), np.array([1., 2., 3.]) * 2., out=s)
assert_allclose(s.value, np.arctan2(1., 2.))
assert s.unit is u.radian
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_two_output_ufunc_inplace(self, value):
v = value * u.m
divisor = 70.*u.cm
v1 = v.copy()
tmp = v.copy()
check = np.divmod(v1, divisor, out=(tmp, v1))
assert check[0] is tmp and check[1] is v1
assert tmp.unit == u.dimensionless_unscaled
assert v1.unit == v.unit
v2 = v.copy()
check2 = np.divmod(v2, divisor, out=(v2, tmp))
assert check2[0] is v2 and check2[1] is tmp
assert v2.unit == u.dimensionless_unscaled
assert tmp.unit == v.unit
v3a = v.copy()
v3b = v.copy()
check3 = np.divmod(v3a, divisor, out=(v3a, v3b))
assert check3[0] is v3a and check3[1] is v3b
assert v3a.unit == u.dimensionless_unscaled
assert v3b.unit == v.unit
def test_ufunc_inplace_non_contiguous_data(self):
# ensure inplace works also for non-contiguous data (closes #1834)
s = np.arange(10.) * u.m
s_copy = s.copy()
s2 = s[::2]
s2 += 1. * u.cm
assert np.all(s[::2] > s_copy[::2])
assert np.all(s[1::2] == s_copy[1::2])
def test_ufunc_inplace_non_standard_dtype(self):
"""Check that inplace operations check properly for casting.
First two tests that check that float32 is kept close #3976.
"""
a1 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a1 *= np.float32(10)
assert a1.unit is u.m
assert a1.dtype == np.float32
a2 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a2 += (20.*u.km)
assert a2.unit is u.m
assert a2.dtype == np.float32
# For integer, in-place only works if no conversion is done.
a3 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
a3 += u.Quantity(10, u.m, dtype=np.int64)
assert a3.unit is u.m
assert a3.dtype == np.int32
a4 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
with pytest.raises(TypeError):
a4 += u.Quantity(10, u.mm, dtype=np.int64)
@pytest.mark.parametrize('ufunc', (np.equal, np.greater))
def test_comparison_ufuncs_inplace(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10., -5., 1.e6]) * u.g / u.Ms
check = np.empty(q_i1.shape, bool)
ufunc(q_i1.value, q_i2.to_value(q_i1.unit), out=check)
result = np.empty(q_i1.shape, bool)
q_o = ufunc(q_i1, q_i2, out=result)
assert q_o is result
assert type(q_o) is np.ndarray
assert q_o.dtype == bool
assert np.all(q_o == check)
@pytest.mark.parametrize('ufunc', (np.isfinite, np.signbit))
def test_onearg_test_ufuncs_inplace(self, ufunc):
q = [1., np.inf, -np.inf, np.nan, -1., 0.] * u.m
check = np.empty(q.shape, bool)
ufunc(q.value, out=check)
result = np.empty(q.shape, bool)
out = ufunc(q, out=result)
assert out is result
assert type(out) is np.ndarray
assert out.dtype == bool
assert np.all(out == ufunc(q.value))
# Ignore RuntimeWarning raised on Windows and s390.
@pytest.mark.filterwarnings('ignore:.*invalid value encountered in sign')
def test_sign_inplace(self):
q = [1., np.inf, -np.inf, np.nan, -1., 0.] * u.m
check = np.empty(q.shape, q.dtype)
np.sign(q.value, out=check)
result = np.empty(q.shape, q.dtype)
out = np.sign(q, out=result)
assert out is result
assert type(out) is np.ndarray
assert out.dtype == q.dtype
assert np.all((out == np.sign(q.value)) |
(np.isnan(out) & np.isnan(q.value)))
@pytest.mark.skipif(not hasattr(np.core.umath, 'clip'),
reason='no clip ufunc available')
class TestClip:
"""Test the clip ufunc.
In numpy, this is hidden behind a function that does not backwards
compatibility checks. We explicitly test the ufunc here.
"""
def setup(self):
self.clip = np.core.umath.clip
def test_clip_simple(self):
q = np.arange(-1., 10.) * u.m
q_min = 125 * u.cm
q_max = 0.0055 * u.km
result = self.clip(q, q_min, q_max)
assert result.unit == q.unit
expected = self.clip(q.value, q_min.to_value(q.unit),
q_max.to_value(q.unit)) * q.unit
assert np.all(result == expected)
def test_clip_unitless_parts(self):
q = np.arange(-1., 10.) * u.m
qlim = 0.0055 * u.km
# one-sided
result1 = self.clip(q, -np.inf, qlim)
expected1 = self.clip(q.value, -np.inf, qlim.to_value(q.unit)) * q.unit
assert np.all(result1 == expected1)
result2 = self.clip(q, qlim, np.inf)
expected2 = self.clip(q.value, qlim.to_value(q.unit), np.inf) * q.unit
assert np.all(result2 == expected2)
# Zero
result3 = self.clip(q, np.zeros(q.shape), qlim)
expected3 = self.clip(q.value, 0, qlim.to_value(q.unit)) * q.unit
assert np.all(result3 == expected3)
# Two unitless parts, array-shaped.
result4 = self.clip(q, np.zeros(q.shape), np.full(q.shape, np.inf))
expected4 = self.clip(q.value, 0, np.inf) * q.unit
assert np.all(result4 == expected4)
def test_clip_dimensionless(self):
q = np.arange(-1., 10.) * u.dimensionless_unscaled
result = self.clip(q, 200 * u.percent, 5.)
expected = self.clip(q, 2., 5.)
assert result.unit == u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_ndarray(self):
a = np.arange(-1., 10.)
result = self.clip(a, 200 * u.percent, 5. * u.dimensionless_unscaled)
assert isinstance(result, u.Quantity)
expected = self.clip(a, 2., 5.) * u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_quantity_inplace(self):
q = np.arange(-1., 10.) * u.m
q_min = 125 * u.cm
q_max = 0.0055 * u.km
expected = self.clip(q.value, q_min.to_value(q.unit),
q_max.to_value(q.unit)) * q.unit
result = self.clip(q, q_min, q_max, out=q)
assert result is q
assert np.all(result == expected)
def test_clip_ndarray_dimensionless_output(self):
a = np.arange(-1., 10.)
q = np.zeros_like(a) * u.m
expected = self.clip(a, 2., 5.) * u.dimensionless_unscaled
result = self.clip(a, 200 * u.percent, 5. * u.dimensionless_unscaled,
out=q)
assert result is q
assert result.unit == u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_errors(self):
q = np.arange(-1., 10.) * u.m
with pytest.raises(u.UnitsError):
self.clip(q, 0, 1*u.s)
with pytest.raises(u.UnitsError):
self.clip(q.value, 0, 1*u.s)
with pytest.raises(u.UnitsError):
self.clip(q, -1, 0.)
with pytest.raises(u.UnitsError):
self.clip(q, 0., 1.)
class TestUfuncAt:
"""Test that 'at' method for ufuncs (calculates in-place at given indices)
For Quantities, since calculations are in-place, it makes sense only
if the result is still a quantity, and if the unit does not have to change
"""
def test_one_argument_ufunc_at(self):
q = np.arange(10.) * u.m
i = np.array([1, 2])
qv = q.value.copy()
np.negative.at(q, i)
np.negative.at(qv, i)
assert np.all(q.value == qv)
assert q.unit is u.m
# cannot change from quantity to bool array
with pytest.raises(TypeError):
np.isfinite.at(q, i)
# for selective in-place, cannot change the unit
with pytest.raises(u.UnitsError):
np.square.at(q, i)
# except if the unit does not change (i.e., dimensionless)
d = np.arange(10.) * u.dimensionless_unscaled
dv = d.value.copy()
np.square.at(d, i)
np.square.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
d = np.arange(10.) * u.dimensionless_unscaled
dv = d.value.copy()
np.log.at(d, i)
np.log.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
# also for sine it doesn't work, even if given an angle
a = np.arange(10.) * u.radian
with pytest.raises(u.UnitsError):
np.sin.at(a, i)
# except, for consistency, if we have made radian equivalent to
# dimensionless (though hopefully it will never be needed)
av = a.value.copy()
with u.add_enabled_equivalencies(u.dimensionless_angles()):
np.sin.at(a, i)
np.sin.at(av, i)
assert_allclose(a.value, av)
# but we won't do double conversion
ad = np.arange(10.) * u.degree
with pytest.raises(u.UnitsError):
np.sin.at(ad, i)
def test_two_argument_ufunc_at(self):
s = np.arange(10.) * u.m
i = np.array([1, 2])
check = s.value.copy()
np.add.at(s, i, 1.*u.km)
np.add.at(check, i, 1000.)
assert np.all(s.value == check)
assert s.unit is u.m
with pytest.raises(u.UnitsError):
np.add.at(s, i, 1.*u.s)
# also raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.at(s, i, 1*u.s)
# but be fine if it does not
s = np.arange(10.) * u.m
check = s.value.copy()
np.multiply.at(s, i, 2.*u.dimensionless_unscaled)
np.multiply.at(check, i, 2)
assert np.all(s.value == check)
s = np.arange(10.) * u.m
np.multiply.at(s, i, 2.)
assert np.all(s.value == check)
# of course cannot change class of data either
with pytest.raises(TypeError):
np.greater.at(s, i, 1.*u.km)
class TestUfuncReduceReduceatAccumulate:
"""Test 'reduce', 'reduceat' and 'accumulate' methods for ufuncs
For Quantities, it makes sense only if the unit does not have to change
"""
def test_one_argument_ufunc_reduce_accumulate(self):
# one argument cannot be used
s = np.arange(10.) * u.radian
i = np.array([0, 5, 1, 6])
with pytest.raises(ValueError):
np.sin.reduce(s)
with pytest.raises(ValueError):
np.sin.accumulate(s)
with pytest.raises(ValueError):
np.sin.reduceat(s, i)
def test_two_argument_ufunc_reduce_accumulate(self):
s = np.arange(10.) * u.m
i = np.array([0, 5, 1, 6])
check = s.value.copy()
s_add_reduce = np.add.reduce(s)
check_add_reduce = np.add.reduce(check)
assert s_add_reduce.value == check_add_reduce
assert s_add_reduce.unit is u.m
s_add_accumulate = np.add.accumulate(s)
check_add_accumulate = np.add.accumulate(check)
assert np.all(s_add_accumulate.value == check_add_accumulate)
assert s_add_accumulate.unit is u.m
s_add_reduceat = np.add.reduceat(s, i)
check_add_reduceat = np.add.reduceat(check, i)
assert np.all(s_add_reduceat.value == check_add_reduceat)
assert s_add_reduceat.unit is u.m
# reduce(at) or accumulate on comparisons makes no sense,
# as intermediate result is not even a Quantity
with pytest.raises(TypeError):
np.greater.reduce(s)
with pytest.raises(TypeError):
np.greater.accumulate(s)
with pytest.raises(TypeError):
np.greater.reduceat(s, i)
# raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.reduce(s)
with pytest.raises(u.UnitsError):
np.multiply.accumulate(s)
with pytest.raises(u.UnitsError):
np.multiply.reduceat(s, i)
# but be fine if it does not
s = np.arange(10.) * u.dimensionless_unscaled
check = s.value.copy()
s_multiply_reduce = np.multiply.reduce(s)
check_multiply_reduce = np.multiply.reduce(check)
assert s_multiply_reduce.value == check_multiply_reduce
assert s_multiply_reduce.unit is u.dimensionless_unscaled
s_multiply_accumulate = np.multiply.accumulate(s)
check_multiply_accumulate = np.multiply.accumulate(check)
assert np.all(s_multiply_accumulate.value == check_multiply_accumulate)
assert s_multiply_accumulate.unit is u.dimensionless_unscaled
s_multiply_reduceat = np.multiply.reduceat(s, i)
check_multiply_reduceat = np.multiply.reduceat(check, i)
assert np.all(s_multiply_reduceat.value == check_multiply_reduceat)
assert s_multiply_reduceat.unit is u.dimensionless_unscaled
class TestUfuncOuter:
"""Test 'outer' methods for ufuncs
Just a few spot checks, since it uses the same code as the regular
ufunc call
"""
def test_one_argument_ufunc_outer(self):
# one argument cannot be used
s = np.arange(10.) * u.radian
with pytest.raises(ValueError):
np.sin.outer(s)
def test_two_argument_ufunc_outer(self):
s1 = np.arange(10.) * u.m
s2 = np.arange(2.) * u.s
check1 = s1.value
check2 = s2.value
s12_multiply_outer = np.multiply.outer(s1, s2)
check12_multiply_outer = np.multiply.outer(check1, check2)
assert np.all(s12_multiply_outer.value == check12_multiply_outer)
assert s12_multiply_outer.unit == s1.unit * s2.unit
# raise UnitsError if appropriate
with pytest.raises(u.UnitsError):
np.add.outer(s1, s2)
# but be fine if it does not
s3 = np.arange(2.) * s1.unit
check3 = s3.value
s13_add_outer = np.add.outer(s1, s3)
check13_add_outer = np.add.outer(check1, check3)
assert np.all(s13_add_outer.value == check13_add_outer)
assert s13_add_outer.unit is s1.unit
s13_greater_outer = np.greater.outer(s1, s3)
check13_greater_outer = np.greater.outer(check1, check3)
assert type(s13_greater_outer) is np.ndarray
assert np.all(s13_greater_outer == check13_greater_outer)
if HAS_SCIPY:
from scipy import special as sps
erf_like_ufuncs = (
sps.erf, sps.erfc, sps.erfcx, sps.erfi,
sps.gamma, sps.gammaln, sps.loggamma, sps.gammasgn, sps.psi,
sps.rgamma, sps.digamma, sps.wofz, sps.dawsn,
sps.entr, sps.exprel, sps.expm1, sps.log1p, sps.exp2, sps.exp10)
if isinstance(sps.erfinv, np.ufunc):
erf_like_ufuncs += (sps.erfinv, sps.erfcinv)
def test_scipy_registration():
"""Check that scipy gets loaded upon first use."""
assert sps.erf not in qh.UFUNC_HELPERS
sps.erf(1. * u.percent)
assert sps.erf in qh.UFUNC_HELPERS
if isinstance(sps.erfinv, np.ufunc):
assert sps.erfinv in qh.UFUNC_HELPERS
else:
assert sps.erfinv not in qh.UFUNC_HELPERS
class TestScipySpecialUfuncs:
@pytest.mark.parametrize('function', erf_like_ufuncs)
def test_erf_scalar(self, function):
TestQuantityMathFuncs.test_exp_scalar(None, function)
@pytest.mark.parametrize('function', erf_like_ufuncs)
def test_erf_array(self, function):
TestQuantityMathFuncs.test_exp_array(None, function)
@pytest.mark.parametrize('function', erf_like_ufuncs)
def test_erf_invalid_units(self, function):
TestQuantityMathFuncs.test_exp_invalid_units(None, function)
@pytest.mark.parametrize('function', (sps.cbrt, ))
def test_cbrt_scalar(self, function):
TestQuantityMathFuncs.test_cbrt_scalar(None, function)
@pytest.mark.parametrize('function', (sps.cbrt, ))
def test_cbrt_array(self, function):
TestQuantityMathFuncs.test_cbrt_array(None, function)
@pytest.mark.parametrize('function', (sps.radian, ))
def test_radian(self, function):
q1 = function(180. * u.degree, 0. * u.arcmin, 0. * u.arcsec)
assert_allclose(q1.value, np.pi)
assert q1.unit == u.radian
q2 = function(0. * u.degree, 30. * u.arcmin, 0. * u.arcsec)
assert_allclose(q2.value, (30. * u.arcmin).to(u.radian).value)
assert q2.unit == u.radian
q3 = function(0. * u.degree, 0. * u.arcmin, 30. * u.arcsec)
assert_allclose(q3.value, (30. * u.arcsec).to(u.radian).value)
# the following doesn't make much sense in terms of the name of the
# routine, but we check it gives the correct result.
q4 = function(3. * u.radian, 0. * u.arcmin, 0. * u.arcsec)
assert_allclose(q4.value, 3.)
assert q4.unit == u.radian
with pytest.raises(TypeError):
function(3. * u.m, 2. * u.s, 1. * u.kg)
jv_like_ufuncs = (
sps.jv, sps.jn, sps.jve, sps.yn, sps.yv, sps.yve, sps.kn, sps.kv,
sps.kve, sps.iv, sps.ive, sps.hankel1, sps.hankel1e, sps.hankel2,
sps.hankel2e)
@pytest.mark.parametrize('function', jv_like_ufuncs)
def test_jv_scalar(self, function):
q = function(2. * u.m / (2. * u.m), 3. * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(1.0, 0.5)
@pytest.mark.parametrize('function', jv_like_ufuncs)
def test_jv_array(self, function):
q = function(np.ones(3) * u.m / (1. * u.m),
np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == function(
np.ones(3),
np.array([1. / 3., 1. / 2., 1.]))
)
# should also work on quantities that can be made dimensionless
q2 = function(np.ones(3) * u.m / (1. * u.m),
np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value,
function(np.ones(3),
np.array([100. / 3., 100. / 2., 100.])))
@pytest.mark.parametrize('function', jv_like_ufuncs)
def test_jv_invalid_units(self, function):
# Can't use jv() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
function(1. * u.kg, 3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{}' function to "
"dimensionless quantities"
.format(function.__name__))
|
aa8fac3bd9b7b2d2c2a42726675856dd3b03fd98c12ffca58813bf41afd1dff1 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import sys
import typing
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
from astropy import units as u
from astropy.units._typing import HAS_ANNOTATED
# list of pairs (target unit/physical type, input unit)
x_inputs = [(u.arcsec, u.deg), ('angle', u.deg),
(u.kpc/u.Myr, u.km/u.s), ('speed', u.km/u.s),
([u.arcsec, u.km], u.deg), ([u.arcsec, u.km], u.km), # multiple allowed
(['angle', 'length'], u.deg), (['angle', 'length'], u.km)]
y_inputs = [(u.m, u.km), (u.km, u.m),
(u.arcsec, u.deg), ('angle', u.deg),
(u.kpc/u.Myr, u.km/u.s), ('speed', u.km/u.s)]
@pytest.fixture(scope="module",
params=list(range(len(x_inputs))))
def x_input(request):
return x_inputs[request.param]
@pytest.fixture(scope="module",
params=list(range(len(y_inputs))))
def y_input(request):
return y_inputs[request.param]
# ---- Tests that use the fixtures defined above ----
def test_args(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1*x_unit, 1*y_unit)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_args_nonquantity(x_input):
x_target, x_unit = x_input
@u.quantity_input(x=x_target)
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1*x_unit, 100)
assert isinstance(x, u.Quantity)
assert isinstance(y, int)
assert x.unit == x_unit
def test_wrong_unit(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
with pytest.raises(u.UnitsError) as e:
x, y = myfunc_args(1*x_unit, 100*u.Joule) # has to be an unspecified unit
str_to = str(y_target)
assert str(e.value) == f"Argument 'y' to function 'myfunc_args' must be in units convertible to '{str_to}'."
def test_wrong_unit_annotated(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input
def myfunc_args(x: x_target, y: y_target):
return x, y
with pytest.raises(u.UnitsError, match="Argument 'y' to function 'myfunc_args'"):
x, y = myfunc_args(1*x_unit, 100*u.Joule) # has to be an unspecified unit
def test_not_quantity(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
with pytest.raises(TypeError) as e:
x, y = myfunc_args(1*x_unit, 100)
assert str(e.value) == "Argument 'y' to function 'myfunc_args' has no 'unit' attribute. You should pass in an astropy Quantity instead."
def test_not_quantity_annotated(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input
def myfunc_args(x: x_target, y: y_target):
return x, y
with pytest.raises(TypeError) as e:
x, y = myfunc_args(1*x_unit, 100)
assert str(e.value) == "Argument 'y' to function 'myfunc_args' has no 'unit' attribute. You should pass in an astropy Quantity instead."
def test_kwargs(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, my_arg, y=1*y_unit):
return x, my_arg, y
x, my_arg, y = myfunc_args(1*x_unit, 100, y=100*y_unit)
assert isinstance(x, u.Quantity)
assert isinstance(my_arg, int)
assert isinstance(y, u.Quantity)
assert y.unit == y_unit
def test_unused_kwargs(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, my_arg1, y=y_unit, my_arg2=1000):
return x, my_arg1, y, my_arg2
x, my_arg1, y, my_arg2 = myfunc_args(1*x_unit, 100,
y=100*y_unit, my_arg2=10)
assert isinstance(x, u.Quantity)
assert isinstance(my_arg1, int)
assert isinstance(y, u.Quantity)
assert isinstance(my_arg2, int)
assert y.unit == y_unit
assert my_arg2 == 10
def test_kwarg_wrong_unit(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10*y_unit):
return x, y
with pytest.raises(u.UnitsError) as e:
x, y = myfunc_args(1*x_unit, y=100*u.Joule)
str_to = str(y_target)
assert str(e.value) == f"Argument 'y' to function 'myfunc_args' must be in units convertible to '{str_to}'."
def test_kwarg_not_quantity(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10*y_unit):
return x, y
with pytest.raises(TypeError) as e:
x, y = myfunc_args(1*x_unit, y=100)
assert str(e.value) == "Argument 'y' to function 'myfunc_args' has no 'unit' attribute. You should pass in an astropy Quantity instead."
def test_kwarg_default(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10*y_unit):
return x, y
x, y = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_kwargs_input(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x=1*x_unit, y=1*y_unit):
return x, y
kwargs = {'x': 10*x_unit, 'y': 10*y_unit}
x, y = myfunc_args(**kwargs)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_kwargs_extra(x_input):
x_target, x_unit = x_input
@u.quantity_input(x=x_target)
def myfunc_args(x, **kwargs):
return x
x = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
# ---- Tests that don't used the fixtures ----
@pytest.mark.parametrize("x_unit,y_unit", [
(u.arcsec, u.eV),
('angle', 'energy')])
def test_arg_equivalencies(x_unit, y_unit):
@u.quantity_input(x=x_unit, y=y_unit,
equivalencies=u.mass_energy())
def myfunc_args(x, y):
return x, y+(10*u.J) # Add an energy to check equiv is working
x, y = myfunc_args(1*u.arcsec, 100*u.gram)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == u.arcsec
assert y.unit == u.gram
@pytest.mark.parametrize("x_unit,energy_unit", [
(u.arcsec, u.eV),
('angle', 'energy')])
def test_kwarg_equivalencies(x_unit, energy_unit):
@u.quantity_input(x=x_unit, energy=energy_unit, equivalencies=u.mass_energy())
def myfunc_args(x, energy=10*u.eV):
return x, energy+(10*u.J) # Add an energy to check equiv is working
x, energy = myfunc_args(1*u.arcsec, 100*u.gram)
assert isinstance(x, u.Quantity)
assert isinstance(energy, u.Quantity)
assert x.unit == u.arcsec
assert energy.unit == u.gram
def test_no_equivalent():
class test_unit:
pass
class test_quantity:
unit = test_unit()
@u.quantity_input(x=u.arcsec)
def myfunc_args(x):
return x
with pytest.raises(TypeError) as e:
x, y = myfunc_args(test_quantity())
assert str(e.value) == "Argument 'x' to function 'myfunc_args' has a 'unit' attribute without an 'is_equivalent' method. You should pass in an astropy Quantity instead."
def test_kwarg_invalid_physical_type():
@u.quantity_input(x='angle', y='africanswallow')
def myfunc_args(x, y=10*u.deg):
return x, y
with pytest.raises(ValueError) as e:
x, y = myfunc_args(1*u.arcsec, y=100*u.deg)
assert str(e.value) == "Invalid unit or physical type 'africanswallow'."
def test_default_value_check():
x_target = u.deg
x_unit = u.arcsec
with pytest.raises(TypeError):
@u.quantity_input(x=x_target)
def myfunc_args(x=1.):
return x
x = myfunc_args()
x = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
def test_str_unit_typo():
@u.quantity_input
def myfunc_args(x: "kilograam"):
return x
with pytest.raises(ValueError):
result = myfunc_args(u.kg)
@pytest.mark.skipif(not HAS_ANNOTATED, reason="need `Annotated`")
class TestTypeAnnotations:
@pytest.mark.parametrize("annot",
[u.m, u.Quantity[u.m], u.Quantity[u.m, "more"]]
if HAS_ANNOTATED else [None]) # Note: parametrization is done even if test class is skipped
def test_single_annotation_unit(self, annot):
"""Try a variety of valid annotations."""
@u.quantity_input
def myfunc_args(x: annot, y: str):
return x, y
i_q, i_str = 2 * u.m, "cool string"
o_q, o_str = myfunc_args(i_q, i_str)
assert o_q == i_q
assert o_str == i_str
def test_args_None():
x_target = u.deg
x_unit = u.arcsec
y_target = u.km
y_unit = u.kpc
@u.quantity_input(x=[x_target, None], y=[None, y_target])
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1*x_unit, None)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
x, y = myfunc_args(None, 1*y_unit)
assert isinstance(y, u.Quantity)
assert y.unit == y_unit
assert x is None
def test_args_None_kwarg():
x_target = u.deg
x_unit = u.arcsec
y_target = u.km
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=None):
return x, y
x, y = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
x, y = myfunc_args(1*x_unit, None)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
with pytest.raises(TypeError):
x, y = myfunc_args(None, None)
@pytest.mark.parametrize('val', [1., 1, np.arange(10), np.arange(10.)])
def test_allow_dimensionless_numeric(val):
"""
When dimensionless_unscaled is an allowed unit, numbers and numeric numpy
arrays are allowed through
"""
@u.quantity_input(velocity=[u.km/u.s, u.dimensionless_unscaled])
def myfunc(velocity):
return velocity
assert np.all(myfunc(val) == val)
@pytest.mark.parametrize('val', [1., 1, np.arange(10), np.arange(10.)])
def test_allow_dimensionless_numeric_strict(val):
"""
When dimensionless_unscaled is an allowed unit, but we are being strict,
don't allow numbers and numeric numpy arrays through
"""
@u.quantity_input(velocity=[u.km/u.s, u.dimensionless_unscaled],
strict_dimensionless=True)
def myfunc(velocity):
return velocity
with pytest.raises(TypeError):
assert myfunc(val)
@pytest.mark.parametrize('val', [1*u.deg, [1, 2, 3]*u.m])
def test_dimensionless_with_nondimensionless_input(val):
"""
When dimensionless_unscaled is the only allowed unit, don't let input with
non-dimensionless units through
"""
@u.quantity_input(x=u.dimensionless_unscaled)
def myfunc(x):
return x
with pytest.raises(u.UnitsError):
myfunc(val)
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires py3.9+")
def test_annotated_not_quantity():
"""Test when annotation looks like a Quantity[X], but isn't."""
@u.quantity_input()
def myfunc(x: typing.Annotated[object, u.m]):
return x
# nothing happens when wrong unit is passed
assert myfunc(1) == 1
assert myfunc(1 * u.m) == 1 * u.m
assert myfunc(1 * u.s) == 1 * u.s
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires py3.9+")
def test_annotated_not_unit():
"""Test when annotation looks like a Quantity[X], but the unit's wrong."""
@u.quantity_input()
def myfunc(x: typing.Annotated[u.Quantity, object()]):
return x
# nothing happens when wrong unit is passed
assert myfunc(1) == 1
assert myfunc(1 * u.m) == 1 * u.m
assert myfunc(1 * u.s) == 1 * u.s
|
ad3f463626ca51a891ec9e05504e9fb0ce700f6d26a56e26e118dc1f729c5f1f | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numbers
import numpy as np
from astropy.units import (
CompositeUnit, Unit, UnitConversionError, UnitsError, UnitTypeError, dimensionless_unscaled,
photometric)
from .core import FunctionQuantity, FunctionUnitBase
from .units import dB, dex, mag
__all__ = ['LogUnit', 'MagUnit', 'DexUnit', 'DecibelUnit',
'LogQuantity', 'Magnitude', 'Decibel', 'Dex',
'STmag', 'ABmag', 'M_bol', 'm_bol']
class LogUnit(FunctionUnitBase):
"""Logarithmic unit containing a physical one
Usually, logarithmic units are instantiated via specific subclasses
such `MagUnit`, `DecibelUnit`, and `DexUnit`.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the logarithmic function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, the same as the logarithmic unit set by the subclass.
"""
# the four essential overrides of FunctionUnitBase
@property
def _default_function_unit(self):
return dex
@property
def _quantity_class(self):
return LogQuantity
def from_physical(self, x):
"""Transformation from value in physical to value in logarithmic units.
Used in equivalency."""
return dex.to(self._function_unit, np.log10(x))
def to_physical(self, x):
"""Transformation from value in logarithmic to value in physical units.
Used in equivalency."""
return 10 ** self._function_unit.to(dex, x)
# ^^^^ the four essential overrides of FunctionUnitBase
# add addition and subtraction, which imply multiplication/division of
# the underlying physical units
def _add_and_adjust_physical_unit(self, other, sign_self, sign_other):
"""Add/subtract LogUnit to/from another unit, and adjust physical unit.
self and other are multiplied by sign_self and sign_other, resp.
We wish to do: ±lu_1 + ±lu_2 -> lu_f (lu=logarithmic unit)
and pu_1^(±1) * pu_2^(±1) -> pu_f (pu=physical unit)
Raises
------
UnitsError
If function units are not equivalent.
"""
# First, insist on compatible logarithmic type. Here, plain u.mag,
# u.dex, and u.dB are OK, i.e., other does not have to be LogUnit
# (this will indirectly test whether other is a unit at all).
try:
getattr(other, 'function_unit', other)._to(self._function_unit)
except AttributeError:
# if other is not a unit (i.e., does not have _to).
return NotImplemented
except UnitsError:
raise UnitsError("Can only add/subtract logarithmic units of"
"of compatible type.")
other_physical_unit = getattr(other, 'physical_unit',
dimensionless_unscaled)
physical_unit = CompositeUnit(
1, [self._physical_unit, other_physical_unit],
[sign_self, sign_other])
return self._copy(physical_unit)
def __neg__(self):
return self._copy(self.physical_unit**(-1))
def __add__(self, other):
# Only know how to add to a logarithmic unit with compatible type,
# be it a plain one (u.mag, etc.,) or another LogUnit
return self._add_and_adjust_physical_unit(other, +1, +1)
def __radd__(self, other):
return self._add_and_adjust_physical_unit(other, +1, +1)
def __sub__(self, other):
return self._add_and_adjust_physical_unit(other, +1, -1)
def __rsub__(self, other):
# here, in normal usage other cannot be LogUnit; only equivalent one
# would be u.mag,u.dB,u.dex. But might as well use common routine.
return self._add_and_adjust_physical_unit(other, -1, +1)
class MagUnit(LogUnit):
"""Logarithmic physical units expressed in magnitudes
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the magnitude function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``mag``, but this allows one to use an equivalent
unit such as ``2 mag``.
"""
@property
def _default_function_unit(self):
return mag
@property
def _quantity_class(self):
return Magnitude
class DexUnit(LogUnit):
"""Logarithmic physical units expressed in magnitudes
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the magnitude function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``dex``, but this allows one to use an equivalent
unit such as ``0.5 dex``.
"""
@property
def _default_function_unit(self):
return dex
@property
def _quantity_class(self):
return Dex
def to_string(self, format='generic'):
if format == 'cds':
if self.physical_unit == dimensionless_unscaled:
return "[-]" # by default, would get "[---]".
else:
return f"[{self.physical_unit.to_string(format=format)}]"
else:
return super(DexUnit, self).to_string()
class DecibelUnit(LogUnit):
"""Logarithmic physical units expressed in dB
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the decibel function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``dB``, but this allows one to use an equivalent
unit such as ``2 dB``.
"""
@property
def _default_function_unit(self):
return dB
@property
def _quantity_class(self):
return Decibel
class LogQuantity(FunctionQuantity):
"""A representation of a (scaled) logarithm of a number with a unit
Parameters
----------
value : number, `~astropy.units.Quantity`, `~astropy.units.function.logarithmic.LogQuantity`, or sequence of quantity-like.
The numerical value of the logarithmic quantity. If a number or
a `~astropy.units.Quantity` with a logarithmic unit, it will be
converted to ``unit`` and the physical unit will be inferred from
``unit``. If a `~astropy.units.Quantity` with just a physical unit,
it will converted to the logarithmic unit, after, if necessary,
converting it to the physical unit inferred from ``unit``.
unit : str, `~astropy.units.UnitBase`, or `~astropy.units.function.FunctionUnitBase`, optional
For an `~astropy.units.function.FunctionUnitBase` instance, the
physical unit will be taken from it; for other input, it will be
inferred from ``value``. By default, ``unit`` is set by the subclass.
dtype : `~numpy.dtype`, optional
The ``dtype`` of the resulting Numpy array or scalar that will
hold the value. If not provided, is is determined automatically
from the input value.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
Examples
--------
Typically, use is made of an `~astropy.units.function.FunctionQuantity`
subclass, as in::
>>> import astropy.units as u
>>> u.Magnitude(-2.5)
<Magnitude -2.5 mag>
>>> u.Magnitude(10.*u.count/u.second)
<Magnitude -2.5 mag(ct / s)>
>>> u.Decibel(1.*u.W, u.DecibelUnit(u.mW)) # doctest: +FLOAT_CMP
<Decibel 30. dB(mW)>
"""
# only override of FunctionQuantity
_unit_class = LogUnit
# additions that work just for logarithmic units
def __add__(self, other):
# Add function units, thus multiplying physical units. If no unit is
# given, assume dimensionless_unscaled; this will give the appropriate
# exception in LogUnit.__add__.
new_unit = self.unit + getattr(other, 'unit', dimensionless_unscaled)
# Add actual logarithmic values, rescaling, e.g., dB -> dex.
result = self._function_view + getattr(other, '_function_view', other)
return self._new_view(result, new_unit)
def __radd__(self, other):
return self.__add__(other)
def __iadd__(self, other):
new_unit = self.unit + getattr(other, 'unit', dimensionless_unscaled)
# Do calculation in-place using _function_view of array.
function_view = self._function_view
function_view += getattr(other, '_function_view', other)
self._set_unit(new_unit)
return self
def __sub__(self, other):
# Subtract function units, thus dividing physical units.
new_unit = self.unit - getattr(other, 'unit', dimensionless_unscaled)
# Subtract actual logarithmic values, rescaling, e.g., dB -> dex.
result = self._function_view - getattr(other, '_function_view', other)
return self._new_view(result, new_unit)
def __rsub__(self, other):
new_unit = self.unit.__rsub__(
getattr(other, 'unit', dimensionless_unscaled))
result = self._function_view.__rsub__(
getattr(other, '_function_view', other))
# Ensure the result is in right function unit scale
# (with rsub, this does not have to be one's own).
result = result.to(new_unit.function_unit)
return self._new_view(result, new_unit)
def __isub__(self, other):
new_unit = self.unit - getattr(other, 'unit', dimensionless_unscaled)
# Do calculation in-place using _function_view of array.
function_view = self._function_view
function_view -= getattr(other, '_function_view', other)
self._set_unit(new_unit)
return self
def __mul__(self, other):
# Multiply by a float or a dimensionless quantity
if isinstance(other, numbers.Number):
# Multiplying a log means putting the factor into the exponent
# of the unit
new_physical_unit = self.unit.physical_unit**other
result = self.view(np.ndarray) * other
return self._new_view(result, self.unit._copy(new_physical_unit))
else:
return super().__mul__(other)
def __rmul__(self, other):
return self.__mul__(other)
def __imul__(self, other):
if isinstance(other, numbers.Number):
new_physical_unit = self.unit.physical_unit**other
function_view = self._function_view
function_view *= other
self._set_unit(self.unit._copy(new_physical_unit))
return self
else:
return super().__imul__(other)
def __truediv__(self, other):
# Divide by a float or a dimensionless quantity
if isinstance(other, numbers.Number):
# Dividing a log means putting the nominator into the exponent
# of the unit
new_physical_unit = self.unit.physical_unit**(1/other)
result = self.view(np.ndarray) / other
return self._new_view(result, self.unit._copy(new_physical_unit))
else:
return super().__truediv__(other)
def __itruediv__(self, other):
if isinstance(other, numbers.Number):
new_physical_unit = self.unit.physical_unit**(1/other)
function_view = self._function_view
function_view /= other
self._set_unit(self.unit._copy(new_physical_unit))
return self
else:
return super().__itruediv__(other)
def __pow__(self, other):
# We check if this power is OK by applying it first to the unit.
try:
other = float(other)
except TypeError:
return NotImplemented
new_unit = self.unit ** other
new_value = self.view(np.ndarray) ** other
return self._new_view(new_value, new_unit)
def __ilshift__(self, other):
try:
other = Unit(other)
except UnitTypeError:
return NotImplemented
if not isinstance(other, self._unit_class):
return NotImplemented
try:
factor = self.unit.physical_unit._to(other.physical_unit)
except UnitConversionError:
# Maybe via equivalencies? Now we do make a temporary copy.
try:
value = self._to_value(other)
except UnitConversionError:
return NotImplemented
self.view(np.ndarray)[...] = value
else:
self.view(np.ndarray)[...] += self.unit.from_physical(factor)
self._set_unit(other)
return self
# Methods that do not work for function units generally but are OK for
# logarithmic units as they imply differences and independence of
# physical unit.
def var(self, axis=None, dtype=None, out=None, ddof=0):
return self._wrap_function(np.var, axis, dtype, out=out, ddof=ddof,
unit=self.unit.function_unit**2)
def std(self, axis=None, dtype=None, out=None, ddof=0):
return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof,
unit=self.unit._copy(dimensionless_unscaled))
def ptp(self, axis=None, out=None):
return self._wrap_function(np.ptp, axis, out=out,
unit=self.unit._copy(dimensionless_unscaled))
def diff(self, n=1, axis=-1):
return self._wrap_function(np.diff, n, axis,
unit=self.unit._copy(dimensionless_unscaled))
def ediff1d(self, to_end=None, to_begin=None):
return self._wrap_function(np.ediff1d, to_end, to_begin,
unit=self.unit._copy(dimensionless_unscaled))
_supported_functions = (FunctionQuantity._supported_functions |
set(getattr(np, function) for function in
('var', 'std', 'ptp', 'diff', 'ediff1d')))
class Dex(LogQuantity):
_unit_class = DexUnit
class Decibel(LogQuantity):
_unit_class = DecibelUnit
class Magnitude(LogQuantity):
_unit_class = MagUnit
dex._function_unit_class = DexUnit
dB._function_unit_class = DecibelUnit
mag._function_unit_class = MagUnit
STmag = MagUnit(photometric.STflux)
STmag.__doc__ = "ST magnitude: STmag=-21.1 corresponds to 1 erg/s/cm2/A"
ABmag = MagUnit(photometric.ABflux)
ABmag.__doc__ = "AB magnitude: ABmag=-48.6 corresponds to 1 erg/s/cm2/Hz"
M_bol = MagUnit(photometric.Bol)
M_bol.__doc__ = ("Absolute bolometric magnitude: M_bol=0 corresponds to "
"L_bol0={}".format(photometric.Bol.si))
m_bol = MagUnit(photometric.bol)
m_bol.__doc__ = ("Apparent bolometric magnitude: m_bol=0 corresponds to "
"f_bol0={}".format(photometric.bol.si))
|
223652b6164b7cef14c60fd362aeca90f611b33f2e0bc550e1c3160166ab5c84 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Function Units and Quantities."""
from abc import ABCMeta, abstractmethod
import numpy as np
from astropy.units import (
Quantity, Unit, UnitBase, UnitConversionError, UnitsError, UnitTypeError,
dimensionless_unscaled)
__all__ = ['FunctionUnitBase', 'FunctionQuantity']
SUPPORTED_UFUNCS = set(getattr(np.core.umath, ufunc) for ufunc in (
'isfinite', 'isinf', 'isnan', 'sign', 'signbit',
'rint', 'floor', 'ceil', 'trunc',
'_ones_like', 'ones_like', 'positive') if hasattr(np.core.umath, ufunc))
# TODO: the following could work if helper changed relative to Quantity:
# - spacing should return dimensionless, not same unit
# - negative should negate unit too,
# - add, subtract, comparisons can work if units added/subtracted
SUPPORTED_FUNCTIONS = set(getattr(np, function) for function in
('clip', 'trace', 'mean', 'min', 'max', 'round'))
# subclassing UnitBase or CompositeUnit was found to be problematic, requiring
# a large number of overrides. Hence, define new class.
class FunctionUnitBase(metaclass=ABCMeta):
"""Abstract base class for function units.
Function units are functions containing a physical unit, such as dB(mW).
Most of the arithmetic operations on function units are defined in this
base class.
While instantiation is defined, this class should not be used directly.
Rather, subclasses should be used that override the abstract properties
`_default_function_unit` and `_quantity_class`, and the abstract methods
`from_physical`, and `to_physical`.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, the same as the function unit set by the subclass.
"""
# ↓↓↓ the following four need to be set by subclasses
# Make this a property so we can ensure subclasses define it.
@property
@abstractmethod
def _default_function_unit(self):
"""Default function unit corresponding to the function.
This property should be overridden by subclasses, with, e.g.,
`~astropy.unit.MagUnit` returning `~astropy.unit.mag`.
"""
# This has to be a property because the function quantity will not be
# known at unit definition time, as it gets defined after.
@property
@abstractmethod
def _quantity_class(self):
"""Function quantity class corresponding to this function unit.
This property should be overridden by subclasses, with, e.g.,
`~astropy.unit.MagUnit` returning `~astropy.unit.Magnitude`.
"""
@abstractmethod
def from_physical(self, x):
"""Transformation from value in physical to value in function units.
This method should be overridden by subclasses. It is used to
provide automatic transformations using an equivalency.
"""
@abstractmethod
def to_physical(self, x):
"""Transformation from value in function to value in physical units.
This method should be overridden by subclasses. It is used to
provide automatic transformations using an equivalency.
"""
# ↑↑↑ the above four need to be set by subclasses
# have priority over arrays, regular units, and regular quantities
__array_priority__ = 30000
def __init__(self, physical_unit=None, function_unit=None):
if physical_unit is None:
self._physical_unit = dimensionless_unscaled
else:
self._physical_unit = Unit(physical_unit)
if (not isinstance(self._physical_unit, UnitBase) or
self._physical_unit.is_equivalent(
self._default_function_unit)):
raise UnitConversionError("Unit {} is not a physical unit."
.format(self._physical_unit))
if function_unit is None:
self._function_unit = self._default_function_unit
else:
# any function unit should be equivalent to subclass default
function_unit = Unit(getattr(function_unit, 'function_unit',
function_unit))
if function_unit.is_equivalent(self._default_function_unit):
self._function_unit = function_unit
else:
raise UnitConversionError(
"Cannot initialize '{}' instance with function unit '{}'"
", as it is not equivalent to default function unit '{}'."
.format(self.__class__.__name__, function_unit,
self._default_function_unit))
def _copy(self, physical_unit=None):
"""Copy oneself, possibly with a different physical unit."""
if physical_unit is None:
physical_unit = self.physical_unit
return self.__class__(physical_unit, self.function_unit)
@property
def physical_unit(self):
return self._physical_unit
@property
def function_unit(self):
return self._function_unit
@property
def equivalencies(self):
"""List of equivalencies between function and physical units.
Uses the `from_physical` and `to_physical` methods.
"""
return [(self, self.physical_unit,
self.to_physical, self.from_physical)]
# ↓↓↓ properties/methods required to behave like a unit
def decompose(self, bases=set()):
"""Copy the current unit with the physical unit decomposed.
For details, see `~astropy.units.UnitBase.decompose`.
"""
return self._copy(self.physical_unit.decompose(bases))
@property
def si(self):
"""Copy the current function unit with the physical unit in SI."""
return self._copy(self.physical_unit.si)
@property
def cgs(self):
"""Copy the current function unit with the physical unit in CGS."""
return self._copy(self.physical_unit.cgs)
def _get_physical_type_id(self):
"""Get physical type corresponding to physical unit."""
return self.physical_unit._get_physical_type_id()
@property
def physical_type(self):
"""Return the physical type of the physical unit (e.g., 'length')."""
return self.physical_unit.physical_type
def is_equivalent(self, other, equivalencies=[]):
"""
Returns `True` if this unit is equivalent to ``other``.
Parameters
----------
other : `~astropy.units.Unit`, string, or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to the built-in equivalencies between the
function unit and the physical one, as well as possible global
defaults set by, e.g., `~astropy.units.set_enabled_equivalencies`.
Use `None` to turn off any global equivalencies.
Returns
-------
bool
"""
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies=equivalencies)
for u in other)
other_physical_unit = getattr(other, 'physical_unit', (
dimensionless_unscaled if self.function_unit.is_equivalent(other)
else other))
return self.physical_unit.is_equivalent(other_physical_unit,
equivalencies)
def to(self, other, value=1., equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : `~astropy.units.Unit`, `~astropy.units.function.FunctionUnitBase`, or str
The unit to convert to.
value : int, float, or scalar array-like, optional
Value(s) in the current unit to be converted to the specified unit.
If not provided, defaults to 1.0.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in meant to treat only equivalencies between different
physical units; the built-in equivalency between the function
unit and the physical one is automatically taken into account.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
`~astropy.units.UnitsError`
If units are inconsistent.
"""
# conversion to one's own physical unit should be fastest
if other is self.physical_unit:
return self.to_physical(value)
other_function_unit = getattr(other, 'function_unit', other)
if self.function_unit.is_equivalent(other_function_unit):
# when other is an equivalent function unit:
# first convert physical units to other's physical units
other_physical_unit = getattr(other, 'physical_unit',
dimensionless_unscaled)
if self.physical_unit != other_physical_unit:
value_other_physical = self.physical_unit.to(
other_physical_unit, self.to_physical(value),
equivalencies)
# make function unit again, in own system
value = self.from_physical(value_other_physical)
# convert possible difference in function unit (e.g., dex->dB)
return self.function_unit.to(other_function_unit, value)
else:
try:
# when other is not a function unit
return self.physical_unit.to(other, self.to_physical(value),
equivalencies)
except UnitConversionError as e:
if self.function_unit == Unit('mag'):
# One can get to raw magnitudes via math that strips the dimensions off.
# Include extra information in the exception to remind users of this.
msg = "Did you perhaps subtract magnitudes so the unit got lost?"
e.args += (msg,)
raise e
else:
raise
def is_unity(self):
return False
def __eq__(self, other):
return (self.physical_unit == getattr(other, 'physical_unit',
dimensionless_unscaled) and
self.function_unit == getattr(other, 'function_unit', other))
def __ne__(self, other):
return not self.__eq__(other)
def __rlshift__(self, other):
"""Unit conversion operator ``<<``"""
try:
return self._quantity_class(other, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __mul__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return self.function_unit * other
else:
raise UnitsError("Cannot multiply a function unit "
"with a physical dimension with any unit.")
else:
# Anything not like a unit, try initialising as a function quantity.
try:
return self._quantity_class(other, unit=self)
except Exception:
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return self.function_unit / other
else:
raise UnitsError("Cannot divide a function unit "
"with a physical dimension by any unit.")
else:
# Anything not like a unit, try initialising as a function quantity.
try:
return self._quantity_class(1./other, unit=self)
except Exception:
return NotImplemented
def __rtruediv__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return other / self.function_unit
else:
raise UnitsError("Cannot divide a function unit "
"with a physical dimension into any unit")
else:
# Don't know what to do with anything not like a unit.
return NotImplemented
def __pow__(self, power):
if power == 0:
return dimensionless_unscaled
elif power == 1:
return self._copy()
if self.physical_unit == dimensionless_unscaled:
return self.function_unit ** power
raise UnitsError("Cannot raise a function unit "
"with a physical dimension to any power but 0 or 1.")
def __pos__(self):
return self._copy()
def to_string(self, format='generic'):
"""
Output the unit in the given format as a string.
The physical unit is appended, within parentheses, to the function
unit, as in "dB(mW)", with both units set using the given format
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
"""
if format not in ('generic', 'unscaled', 'latex', 'latex_inline'):
raise ValueError(f"Function units cannot be written in {format} "
"format. Only 'generic', 'unscaled', 'latex' and "
"'latex_inline' are supported.")
self_str = self.function_unit.to_string(format)
pu_str = self.physical_unit.to_string(format)
if pu_str == '':
pu_str = '1'
if format.startswith('latex'):
self_str += r'$\mathrm{{\left( {0} \right)}}$'.format(
pu_str[1:-1]) # need to strip leading and trailing "$"
else:
self_str += f'({pu_str})'
return self_str
def __str__(self):
"""Return string representation for unit."""
self_str = str(self.function_unit)
pu_str = str(self.physical_unit)
if pu_str:
self_str += f'({pu_str})'
return self_str
def __repr__(self):
# By default, try to give a representation using `Unit(<string>)`,
# with string such that parsing it would give the correct FunctionUnit.
if callable(self.function_unit):
return f'Unit("{self.to_string()}")'
else:
return '{}("{}"{})'.format(
self.__class__.__name__, self.physical_unit,
"" if self.function_unit is self._default_function_unit
else f', unit="{self.function_unit}"')
def _repr_latex_(self):
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
return self.to_string('latex')
def __hash__(self):
return hash((self.function_unit, self.physical_unit))
class FunctionQuantity(Quantity):
"""A representation of a (scaled) function of a number with a unit.
Function quantities are quantities whose units are functions containing a
physical unit, such as dB(mW). Most of the arithmetic operations on
function quantities are defined in this base class.
While instantiation is also defined here, this class should not be
instantiated directly. Rather, subclasses should be made which have
``_unit_class`` pointing back to the corresponding function unit class.
Parameters
----------
value : number, quantity-like, or sequence thereof
The numerical value of the function quantity. If a number or
a `~astropy.units.Quantity` with a function unit, it will be converted
to ``unit`` and the physical unit will be inferred from ``unit``.
If a `~astropy.units.Quantity` with just a physical unit, it will
converted to the function unit, after, if necessary, converting it to
the physical unit inferred from ``unit``.
unit : str, `~astropy.units.UnitBase`, or `~astropy.units.function.FunctionUnitBase`, optional
For an `~astropy.units.function.FunctionUnitBase` instance, the
physical unit will be taken from it; for other input, it will be
inferred from ``value``. By default, ``unit`` is set by the subclass.
dtype : `~numpy.dtype`, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any input that cannot represent float (integer and bool)
is converted to float.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. Ignored
if the input does not need to be converted and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be of the
class used. Otherwise, subclasses will be passed through.
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be pre-pended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`~astropy.units.Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not a `~astropy.units.function.FunctionUnitBase`
or `~astropy.units.Unit` object, or a parseable string unit.
"""
_unit_class = None
"""Default `~astropy.units.function.FunctionUnitBase` subclass.
This should be overridden by subclasses.
"""
# Ensure priority over ndarray, regular Unit & Quantity, and FunctionUnit.
__array_priority__ = 40000
# Define functions that work on FunctionQuantity.
_supported_ufuncs = SUPPORTED_UFUNCS
_supported_functions = SUPPORTED_FUNCTIONS
def __new__(cls, value, unit=None, dtype=None, copy=True, order=None,
subok=False, ndmin=0):
if unit is not None:
# Convert possible string input to a (function) unit.
unit = Unit(unit)
if not isinstance(unit, FunctionUnitBase):
# By default, use value's physical unit.
value_unit = getattr(value, 'unit', None)
if value_unit is None:
# if iterable, see if first item has a unit
# (mixed lists fail in super call below).
try:
value_unit = getattr(value[0], 'unit')
except Exception:
pass
physical_unit = getattr(value_unit, 'physical_unit', value_unit)
unit = cls._unit_class(physical_unit, function_unit=unit)
# initialise!
return super().__new__(cls, value, unit, dtype=dtype, copy=copy,
order=order, subok=subok, ndmin=ndmin)
# ↓↓↓ properties not found in Quantity
@property
def physical(self):
"""The physical quantity corresponding the function one."""
return self.to(self.unit.physical_unit)
@property
def _function_view(self):
"""View as Quantity with function unit, dropping the physical unit.
Use `~astropy.units.quantity.Quantity.value` for just the value.
"""
return self._new_view(unit=self.unit.function_unit)
# ↓↓↓ methods overridden to change the behavior
@property
def si(self):
"""Return a copy with the physical unit in SI units."""
return self.__class__(self.physical.si)
@property
def cgs(self):
"""Return a copy with the physical unit in CGS units."""
return self.__class__(self.physical.cgs)
def decompose(self, bases=[]):
"""Generate a new `FunctionQuantity` with the physical unit decomposed.
For details, see `~astropy.units.Quantity.decompose`.
"""
return self.__class__(self.physical.decompose(bases))
# ↓↓↓ methods overridden to add additional behavior
def __quantity_subclass__(self, unit):
if isinstance(unit, FunctionUnitBase):
return self.__class__, True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if not isinstance(unit, self._unit_class):
# Have to take care of, e.g., (10*u.mag).view(u.Magnitude)
try:
# "or 'nonsense'" ensures `None` breaks, just in case.
unit = self._unit_class(function_unit=unit or 'nonsense')
except Exception:
raise UnitTypeError(
"{} instances require {} function units"
.format(type(self).__name__, self._unit_class.__name__) +
f", so cannot set it to '{unit}'.")
self._unit = unit
def __array_ufunc__(self, function, method, *inputs, **kwargs):
# TODO: it would be more logical to have this in Quantity already,
# instead of in UFUNC_HELPERS, where it cannot be overridden.
# And really it should just return NotImplemented, since possibly
# another argument might know what to do.
if function not in self._supported_ufuncs:
raise UnitTypeError(
f"Cannot use ufunc '{function.__name__}' with function quantities")
return super().__array_ufunc__(function, method, *inputs, **kwargs)
# ↓↓↓ methods overridden to change behavior
def __mul__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._function_view * other
raise UnitTypeError("Cannot multiply function quantities which "
"are not dimensionless with anything.")
def __truediv__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._function_view / other
raise UnitTypeError("Cannot divide function quantities which "
"are not dimensionless by anything.")
def __rtruediv__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._function_view.__rtruediv__(other)
raise UnitTypeError("Cannot divide function quantities which "
"are not dimensionless into anything.")
def _comparison(self, other, comparison_func):
"""Do a comparison between self and other, raising UnitsError when
other cannot be converted to self because it has different physical
unit, and returning NotImplemented when there are other errors."""
try:
# will raise a UnitsError if physical units not equivalent
other_in_own_unit = self._to_own_unit(other, check_precision=False)
except UnitsError as exc:
if self.unit.physical_unit != dimensionless_unscaled:
raise exc
try:
other_in_own_unit = self._function_view._to_own_unit(
other, check_precision=False)
except Exception:
raise exc
except Exception:
return NotImplemented
return comparison_func(other_in_own_unit)
def __eq__(self, other):
try:
return self._comparison(other, self.value.__eq__)
except UnitsError:
return False
def __ne__(self, other):
try:
return self._comparison(other, self.value.__ne__)
except UnitsError:
return True
def __gt__(self, other):
return self._comparison(other, self.value.__gt__)
def __ge__(self, other):
return self._comparison(other, self.value.__ge__)
def __lt__(self, other):
return self._comparison(other, self.value.__lt__)
def __le__(self, other):
return self._comparison(other, self.value.__le__)
def __lshift__(self, other):
"""Unit conversion operator `<<`"""
try:
other = Unit(other, parse_strict='silent')
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
# Ensure Quantity methods are used only if they make sense.
def _wrap_function(self, function, *args, **kwargs):
if function in self._supported_functions:
return super()._wrap_function(function, *args, **kwargs)
# For dimensionless, we can convert to regular quantities.
if all(arg.unit.physical_unit == dimensionless_unscaled
for arg in (self,) + args
if (hasattr(arg, 'unit') and
hasattr(arg.unit, 'physical_unit'))):
args = tuple(getattr(arg, '_function_view', arg) for arg in args)
return self._function_view._wrap_function(function, *args, **kwargs)
raise TypeError("Cannot use method that uses function '{}' with "
"function quantities that are not dimensionless."
.format(function.__name__))
# Override functions that are supported but do not use _wrap_function
# in Quantity.
def max(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.max, axis, out=out, keepdims=keepdims)
def min(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.min, axis, out=out, keepdims=keepdims)
def sum(self, axis=None, dtype=None, out=None, keepdims=False):
return self._wrap_function(np.sum, axis, dtype, out=out,
keepdims=keepdims)
def cumsum(self, axis=None, dtype=None, out=None):
return self._wrap_function(np.cumsum, axis, dtype, out=out)
def clip(self, a_min, a_max, out=None):
return self._wrap_function(np.clip, self._to_own_unit(a_min),
self._to_own_unit(a_max), out=out)
|
6187c6e52a9c4e2461f001932dc8e1f21e92b9b18c90546ed0f6ed7e7bf5d64f | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines units that can also be used as functions of other units.
If called, their arguments are used to initialize the corresponding function
unit (e.g., ``u.mag(u.ct/u.s)``). Note that the prefixed versions cannot be
called, as it would be unclear what, e.g., ``u.mmag(u.ct/u.s)`` would mean.
"""
from astropy.units.core import _add_prefixes
from .mixin import IrreducibleFunctionUnit, RegularFunctionUnit
_ns = globals()
###########################################################################
# Logarithmic units
# These calls are what core.def_unit would do, but we need to use the callable
# unit versions. The actual function unit classes get added in logarithmic.
dex = IrreducibleFunctionUnit(['dex'], namespace=_ns,
doc="Dex: Base 10 logarithmic unit")
dB = RegularFunctionUnit(['dB', 'decibel'], 0.1 * dex, namespace=_ns,
doc="Decibel: ten per base 10 logarithmic unit")
mag = RegularFunctionUnit(['mag'], -0.4 * dex, namespace=_ns,
doc=("Astronomical magnitude: "
"-2.5 per base 10 logarithmic unit"))
_add_prefixes(mag, namespace=_ns, prefixes=True)
###########################################################################
# CLEANUP
del RegularFunctionUnit
del IrreducibleFunctionUnit
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from astropy.units.utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
53c0ce3a3bb6ef866353692738556da458825d4054bc2e197aaa6eb06bded209 | import os
import pytest
import numpy as np
from urllib.error import HTTPError, URLError
from astropy.time import Time
from astropy import units as u
from astropy.constants import c
from astropy.coordinates.builtin_frames import GCRS, TETE
from astropy.coordinates.earth import EarthLocation
from astropy.coordinates.sky_coordinate import SkyCoord
from astropy.coordinates.representation import CartesianRepresentation, UnitSphericalRepresentation
from astropy.coordinates.solar_system import (get_body, get_moon, BODY_NAME_TO_KERNEL_SPEC,
_get_apparent_body_position, solar_system_ephemeris,
get_body_barycentric, get_body_barycentric_posvel)
from astropy.coordinates.funcs import get_sun
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import allclose as quantity_allclose
from astropy.utils.data import download_file, get_pkg_data_filename
from astropy.utils.compat.optional_deps import (HAS_JPLEPHEM, # noqa
HAS_SKYFIELD)
if HAS_SKYFIELD:
from skyfield.api import Loader, Topos
de432s_separation_tolerance_planets = 5*u.arcsec
de432s_separation_tolerance_moon = 5*u.arcsec
de432s_distance_tolerance = 20*u.km
skyfield_angular_separation_tolerance = 1*u.arcsec
skyfield_separation_tolerance = 10*u.km
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_SKYFIELD')
def test_positions_skyfield(tmpdir):
"""
Test positions against those generated by skyfield.
"""
load = Loader(tmpdir)
t = Time('1980-03-25 00:00')
location = None
# skyfield ephemeris
try:
planets = load('de421.bsp')
ts = load.timescale()
except OSError as e:
if os.environ.get('CI', False) and 'timed out' in str(e):
pytest.xfail('Timed out in CI')
else:
raise
mercury, jupiter, moon = planets['mercury'], planets['jupiter barycenter'], planets['moon']
earth = planets['earth']
skyfield_t = ts.from_astropy(t)
if location is not None:
earth = earth+Topos(latitude_degrees=location.lat.to_value(u.deg),
longitude_degrees=location.lon.to_value(u.deg),
elevation_m=location.height.to_value(u.m))
skyfield_mercury = earth.at(skyfield_t).observe(mercury).apparent()
skyfield_jupiter = earth.at(skyfield_t).observe(jupiter).apparent()
skyfield_moon = earth.at(skyfield_t).observe(moon).apparent()
if location is not None:
frame = TETE(obstime=t, location=location)
else:
frame = TETE(obstime=t)
ra, dec, dist = skyfield_mercury.radec(epoch='date')
skyfield_mercury = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km),
frame=frame)
ra, dec, dist = skyfield_jupiter.radec(epoch='date')
skyfield_jupiter = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km),
frame=frame)
ra, dec, dist = skyfield_moon.radec(epoch='date')
skyfield_moon = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km),
frame=frame)
# planet positions w.r.t true equator and equinox
moon_astropy = get_moon(t, location, ephemeris='de430').transform_to(frame)
mercury_astropy = get_body('mercury', t, location, ephemeris='de430').transform_to(frame)
jupiter_astropy = get_body('jupiter', t, location, ephemeris='de430').transform_to(frame)
assert (moon_astropy.separation(skyfield_moon) <
skyfield_angular_separation_tolerance)
assert (moon_astropy.separation_3d(skyfield_moon) < skyfield_separation_tolerance)
assert (jupiter_astropy.separation(skyfield_jupiter) <
skyfield_angular_separation_tolerance)
assert (jupiter_astropy.separation_3d(skyfield_jupiter) <
skyfield_separation_tolerance)
assert (mercury_astropy.separation(skyfield_mercury) <
skyfield_angular_separation_tolerance)
assert (mercury_astropy.separation_3d(skyfield_mercury) <
skyfield_separation_tolerance)
planets.close()
class TestPositionsGeocentric:
"""
Test positions against those generated by JPL Horizons accessed on
2016-03-28, with refraction turned on.
"""
def setup(self):
self.t = Time('1980-03-25 00:00')
self.apparent_frame = TETE(obstime=self.t)
# Results returned by JPL Horizons web interface
self.horizons = {
'mercury': SkyCoord(ra='22h41m47.78s', dec='-08d29m32.0s',
distance=c*6.323037*u.min, frame=self.apparent_frame),
'moon': SkyCoord(ra='07h32m02.62s', dec='+18d34m05.0s',
distance=c*0.021921*u.min, frame=self.apparent_frame),
'jupiter': SkyCoord(ra='10h17m12.82s', dec='+12d02m57.0s',
distance=c*37.694557*u.min, frame=self.apparent_frame),
'sun': SkyCoord(ra='00h16m31.00s', dec='+01d47m16.9s',
distance=c*8.294858*u.min, frame=self.apparent_frame)}
@pytest.mark.parametrize(('body', 'sep_tol', 'dist_tol'),
(('mercury', 7.*u.arcsec, 1000*u.km),
('jupiter', 78.*u.arcsec, 76000*u.km),
('moon', 20.*u.arcsec, 80*u.km),
('sun', 5.*u.arcsec, 11.*u.km)))
def test_erfa_planet(self, body, sep_tol, dist_tol):
"""Test predictions using erfa/plan94.
Accuracies are maximum deviations listed in erfa/plan94.c, for Jupiter and
Mercury, and that quoted in Meeus "Astronomical Algorithms" (1998) for the Moon.
"""
astropy = get_body(body, self.t, ephemeris='builtin')
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert astropy.separation(horizons) < sep_tol
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=dist_tol)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize('body', ('mercury', 'jupiter', 'sun'))
def test_de432s_planet(self, body):
astropy = get_body(body, self.t, ephemeris='de432s')
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert (astropy.separation(horizons) <
de432s_separation_tolerance_planets)
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=de432s_distance_tolerance)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
def test_de432s_moon(self):
astropy = get_moon(self.t, ephemeris='de432s')
horizons = self.horizons['moon']
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert (astropy.separation(horizons) <
de432s_separation_tolerance_moon)
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=de432s_distance_tolerance)
class TestPositionKittPeak:
"""
Test positions against those generated by JPL Horizons accessed on
2016-03-28, with refraction turned on.
"""
def setup(self):
kitt_peak = EarthLocation.from_geodetic(lon=-111.6*u.deg,
lat=31.963333333333342*u.deg,
height=2120*u.m)
self.t = Time('2014-09-25T00:00', location=kitt_peak)
self.apparent_frame = TETE(obstime=self.t, location=kitt_peak)
# Results returned by JPL Horizons web interface
self.horizons = {
'mercury': SkyCoord(ra='13h38m58.50s', dec='-13d34m42.6s',
distance=c*7.699020*u.min, frame=self.apparent_frame),
'moon': SkyCoord(ra='12h33m12.85s', dec='-05d17m54.4s',
distance=c*0.022054*u.min, frame=self.apparent_frame),
'jupiter': SkyCoord(ra='09h09m55.55s', dec='+16d51m57.8s',
distance=c*49.244937*u.min, frame=self.apparent_frame)}
@pytest.mark.parametrize(('body', 'sep_tol', 'dist_tol'),
(('mercury', 7.*u.arcsec, 500*u.km),
('jupiter', 78.*u.arcsec, 82000*u.km)))
def test_erfa_planet(self, body, sep_tol, dist_tol):
"""Test predictions using erfa/plan94.
Accuracies are maximum deviations listed in erfa/plan94.c.
"""
# Add uncertainty in position of Earth
dist_tol = dist_tol + 1300 * u.km
astropy = get_body(body, self.t, ephemeris='builtin')
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert astropy.separation(horizons) < sep_tol
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=dist_tol)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize('body', ('mercury', 'jupiter'))
def test_de432s_planet(self, body):
astropy = get_body(body, self.t, ephemeris='de432s')
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert (astropy.separation(horizons) <
de432s_separation_tolerance_planets)
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=de432s_distance_tolerance)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
def test_de432s_moon(self):
astropy = get_moon(self.t, ephemeris='de432s')
horizons = self.horizons['moon']
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert (astropy.separation(horizons) <
de432s_separation_tolerance_moon)
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=de432s_distance_tolerance)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize('bodyname', ('mercury', 'jupiter'))
def test_custom_kernel_spec_body(self, bodyname):
"""
Checks that giving a kernel specifier instead of a body name works
"""
coord_by_name = get_body(bodyname, self.t, ephemeris='de432s')
kspec = BODY_NAME_TO_KERNEL_SPEC[bodyname]
coord_by_kspec = get_body(kspec, self.t, ephemeris='de432s')
assert_quantity_allclose(coord_by_name.ra, coord_by_kspec.ra)
assert_quantity_allclose(coord_by_name.dec, coord_by_kspec.dec)
assert_quantity_allclose(coord_by_name.distance, coord_by_kspec.distance)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
def test_horizons_consistency_with_precision():
"""
A test to compare at high precision against output of JPL horizons.
Tests ephemerides, and conversions from ICRS to GCRS to TETE. We are aiming for
better than 2 milli-arcsecond precision.
We use the Moon since it is nearby, and moves fast in the sky so we are
testing for parallax, proper handling of light deflection and aberration.
"""
# JPL Horizon values for 2020_04_06 00:00 to 23:00 in 1 hour steps
# JPL Horizons has a known offset (frame bias) of 51.02 mas in RA. We correct that here
ra_apparent_horizons = [
170.167332531, 170.560688674, 170.923834838, 171.271663481, 171.620188972, 171.985340827,
172.381766539, 172.821772139, 173.314502650, 173.865422398, 174.476108551, 175.144332386,
175.864375310, 176.627519827, 177.422655853, 178.236955730, 179.056584831, 179.867427392,
180.655815385, 181.409252074, 182.117113814, 182.771311578, 183.366872837, 183.902395443
] * u.deg + 51.02376467 * u.mas
dec_apparent_horizons = [
10.269112037, 10.058820647, 9.837152044, 9.603724551, 9.358956528, 9.104012390, 8.840674927,
8.571162442, 8.297917326, 8.023394488, 7.749873882, 7.479312991, 7.213246666, 6.952732614,
6.698336823, 6.450150213, 6.207828142, 5.970645962, 5.737565957, 5.507313851, 5.278462034,
5.049521497, 4.819038911, 4.585696512
] * u.deg
with solar_system_ephemeris.set('de430'):
loc = EarthLocation.from_geodetic(-67.787260*u.deg, -22.959748*u.deg, 5186*u.m)
times = Time('2020-04-06 00:00') + np.arange(0, 24, 1)*u.hour
astropy = get_body('moon', times, loc)
apparent_frame = TETE(obstime=times, location=loc)
astropy = astropy.transform_to(apparent_frame)
usrepr = UnitSphericalRepresentation(ra_apparent_horizons, dec_apparent_horizons)
horizons = apparent_frame.realize_frame(usrepr)
assert_quantity_allclose(astropy.separation(horizons), 0*u.mas, atol=1.5*u.mas)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize('time', (Time('1960-01-12 00:00'),
Time('1980-03-25 00:00'),
Time('2010-10-13 00:00')))
def test_get_sun_consistency(time):
"""
Test that the sun from JPL and the builtin get_sun match
"""
sun_jpl_gcrs = get_body('sun', time, ephemeris='de432s')
builtin_get_sun = get_sun(time)
sep = builtin_get_sun.separation(sun_jpl_gcrs)
assert sep < 0.1*u.arcsec
def test_get_moon_nonscalar_regression():
"""
Test that the builtin ephemeris works with non-scalar times.
See Issue #5069.
"""
times = Time(["2015-08-28 03:30", "2015-09-05 10:30"])
# the following line will raise an Exception if the bug recurs.
get_moon(times, ephemeris='builtin')
def test_barycentric_pos_posvel_same():
# Check that the two routines give identical results.
ep1 = get_body_barycentric('earth', Time('2016-03-20T12:30:00'))
ep2, _ = get_body_barycentric_posvel('earth', Time('2016-03-20T12:30:00'))
assert np.all(ep1.xyz == ep2.xyz)
def test_earth_barycentric_velocity_rough():
# Check that a time near the equinox gives roughly the right result.
ep, ev = get_body_barycentric_posvel('earth', Time('2016-03-20T12:30:00'))
assert_quantity_allclose(ep.xyz, [-1., 0., 0.]*u.AU, atol=0.01*u.AU)
expected = u.Quantity([0.*u.one,
np.cos(23.5*u.deg),
np.sin(23.5*u.deg)]) * -30. * u.km / u.s
assert_quantity_allclose(ev.xyz, expected, atol=1.*u.km/u.s)
def test_earth_barycentric_velocity_multi_d():
# Might as well test it with a multidimensional array too.
t = Time('2016-03-20T12:30:00') + np.arange(8.).reshape(2, 2, 2) * u.yr / 2.
ep, ev = get_body_barycentric_posvel('earth', t)
# note: assert_quantity_allclose doesn't like the shape mismatch.
# this is a problem with np.testing.assert_allclose.
assert quantity_allclose(ep.get_xyz(xyz_axis=-1),
[[-1., 0., 0.], [+1., 0., 0.]]*u.AU,
atol=0.06*u.AU)
expected = u.Quantity([0.*u.one,
np.cos(23.5*u.deg),
np.sin(23.5*u.deg)]) * ([[-30.], [30.]] * u.km / u.s)
assert quantity_allclose(ev.get_xyz(xyz_axis=-1), expected,
atol=2.*u.km/u.s)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize(('body', 'pos_tol', 'vel_tol'),
(('mercury', 1000.*u.km, 1.*u.km/u.s),
('jupiter', 100000.*u.km, 2.*u.km/u.s),
('earth', 10*u.km, 10*u.mm/u.s),
('moon', 18*u.km, 50*u.mm/u.s)))
def test_barycentric_velocity_consistency(body, pos_tol, vel_tol):
# Tolerances are about 1.5 times the rms listed for plan94 and epv00,
# except for Mercury (which nominally is 334 km rms), and the Moon
# (which nominally is 6 km rms).
t = Time('2016-03-20T12:30:00')
ep, ev = get_body_barycentric_posvel(body, t, ephemeris='builtin')
dp, dv = get_body_barycentric_posvel(body, t, ephemeris='de432s')
assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol)
assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol)
# Might as well test it with a multidimensional array too.
t = Time('2016-03-20T12:30:00') + np.arange(8.).reshape(2, 2, 2) * u.yr / 2.
ep, ev = get_body_barycentric_posvel(body, t, ephemeris='builtin')
dp, dv = get_body_barycentric_posvel(body, t, ephemeris='de432s')
assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol)
assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize('time', (Time('1960-01-12 00:00'),
Time('1980-03-25 00:00'),
Time('2010-10-13 00:00')))
def test_url_or_file_ephemeris(time):
# URL for ephemeris de432s used for testing:
url = 'http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/de432s.bsp'
# Pass the ephemeris directly as a URL.
coord_by_url = get_body('earth', time, ephemeris=url)
# Translate the URL to the cached location on the filesystem.
# Since we just used the url above, it should already have been downloaded.
filepath = download_file(url, cache=True)
# Get the coordinates using the file path directly:
coord_by_filepath = get_body('earth', time, ephemeris=filepath)
# Using the URL or filepath should give exactly the same results:
assert_quantity_allclose(coord_by_url.ra, coord_by_filepath.ra)
assert_quantity_allclose(coord_by_url.dec, coord_by_filepath.dec)
assert_quantity_allclose(coord_by_url.distance, coord_by_filepath.distance)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
def test_url_ephemeris_wrong_input():
time = Time('1960-01-12 00:00')
with pytest.raises((HTTPError, URLError)):
# A non-existent URL
get_body('earth', time, ephemeris=get_pkg_data_filename('path/to/nonexisting/file.bsp'))
with pytest.raises(HTTPError):
# A non-existent version of the JPL ephemeris
get_body('earth', time, ephemeris='de001')
with pytest.raises(ValueError):
# An invalid string
get_body('earth', time, ephemeris='not_an_ephemeris')
@pytest.mark.skipif('not HAS_JPLEPHEM')
def test_file_ephemeris_wrong_input():
time = Time('1960-01-12 00:00')
# Try loading a non-existing file:
with pytest.raises(ValueError):
get_body('earth', time, ephemeris='/path/to/nonexisting/file.bsp')
# NOTE: This test currently leaves the file open (ResourceWarning).
# To fix this issue, an upstream fix is required in jplephem
# package.
# Try loading a file that does exist, but is not an ephemeris file:
with pytest.warns(ResourceWarning), pytest.raises(ValueError):
get_body('earth', time, ephemeris=__file__)
def test_regression_10271():
t = Time(58973.534052125986, format='mjd')
# GCRS position of ALMA at this time
obs_p = CartesianRepresentation(5724535.74068625, -1311071.58985697, -2492738.93017009, u.m)
geocentre = CartesianRepresentation(0, 0, 0, u.m)
icrs_sun_from_alma = _get_apparent_body_position('sun', t, 'builtin', obs_p)
icrs_sun_from_geocentre = _get_apparent_body_position('sun', t, 'builtin', geocentre)
difference = (icrs_sun_from_alma - icrs_sun_from_geocentre).norm()
assert_quantity_allclose(difference, 0.13046941*u.m, atol=1*u.mm)
|
1a5b1ca7df7de52490112091322310c6fd4c53aa23d1de81df93c7dcf7f51e6b | # -*- coding: utf-8 -*-
"""
Tests the Angle string formatting capabilities. SkyCoord formatting is in
test_sky_coord
"""
from astropy.coordinates.angles import Angle
from astropy import units as u
def test_to_string_precision():
# There are already some tests in test_api.py, but this is a regression
# test for the bug in issue #1319 which caused incorrect formatting of the
# seconds for precision=0
angle = Angle(-1.23456789, unit=u.degree)
assert angle.to_string(precision=3) == '-1d14m04.444s'
assert angle.to_string(precision=1) == '-1d14m04.4s'
assert angle.to_string(precision=0) == '-1d14m04s'
angle2 = Angle(-1.23456789, unit=u.hourangle)
assert angle2.to_string(precision=3, unit=u.hour) == '-1h14m04.444s'
assert angle2.to_string(precision=1, unit=u.hour) == '-1h14m04.4s'
assert angle2.to_string(precision=0, unit=u.hour) == '-1h14m04s'
# Regression test for #7141
angle3 = Angle(-0.5, unit=u.degree)
assert angle3.to_string(precision=0, fields=3) == '-0d30m00s'
assert angle3.to_string(precision=0, fields=2) == '-0d30m'
assert angle3.to_string(precision=0, fields=1) == '-1d'
def test_to_string_decimal():
# There are already some tests in test_api.py, but this is a regression
# test for the bug in issue #1323 which caused decimal formatting to not
# work
angle1 = Angle(2., unit=u.degree)
assert angle1.to_string(decimal=True, precision=3) == '2.000'
assert angle1.to_string(decimal=True, precision=1) == '2.0'
assert angle1.to_string(decimal=True, precision=0) == '2'
angle2 = Angle(3., unit=u.hourangle)
assert angle2.to_string(decimal=True, precision=3) == '3.000'
assert angle2.to_string(decimal=True, precision=1) == '3.0'
assert angle2.to_string(decimal=True, precision=0) == '3'
angle3 = Angle(4., unit=u.radian)
assert angle3.to_string(decimal=True, precision=3) == '4.000'
assert angle3.to_string(decimal=True, precision=1) == '4.0'
assert angle3.to_string(decimal=True, precision=0) == '4'
def test_to_string_formats():
a = Angle(1.113355, unit=u.deg)
latex_str = r'$1^\circ06{}^\prime48.078{}^{\prime\prime}$'
assert a.to_string(format='latex') == latex_str
assert a.to_string(format='latex_inline') == latex_str
assert a.to_string(format='unicode') == '1°06′48.078″'
a = Angle(1.113355, unit=u.hour)
latex_str = r'$1^{\mathrm{h}}06^{\mathrm{m}}48.078^{\mathrm{s}}$'
assert a.to_string(format='latex') == latex_str
assert a.to_string(format='latex_inline') == latex_str
assert a.to_string(format='unicode') == '1ʰ06ᵐ48.078ˢ'
a = Angle(1.113355, unit=u.radian)
assert a.to_string(format='latex') == r'$1.11336\mathrm{rad}$'
assert a.to_string(format='latex_inline') == r'$1.11336\mathrm{rad}$'
assert a.to_string(format='unicode') == '1.11336rad'
def test_to_string_fields():
a = Angle(1.113355, unit=u.deg)
assert a.to_string(fields=1) == r'1d'
assert a.to_string(fields=2) == r'1d07m'
assert a.to_string(fields=3) == r'1d06m48.078s'
def test_to_string_padding():
a = Angle(0.5653, unit=u.deg)
assert a.to_string(unit='deg', sep=':', pad=True) == r'00:33:55.08'
# Test to make sure negative angles are padded correctly
a = Angle(-0.5653, unit=u.deg)
assert a.to_string(unit='deg', sep=':', pad=True) == r'-00:33:55.08'
def test_sexagesimal_rounding_up():
a = Angle(359.999999999999, unit=u.deg)
assert a.to_string(precision=None) == '360d00m00s'
assert a.to_string(precision=4) == '360d00m00.0000s'
assert a.to_string(precision=5) == '360d00m00.00000s'
assert a.to_string(precision=6) == '360d00m00.000000s'
assert a.to_string(precision=7) == '360d00m00.0000000s'
assert a.to_string(precision=8) == '360d00m00.00000000s'
assert a.to_string(precision=9) == '359d59m59.999999996s'
a = Angle(3.999999, unit=u.deg)
assert a.to_string(fields=2, precision=None) == '4d00m'
assert a.to_string(fields=2, precision=1) == '4d00m'
assert a.to_string(fields=2, precision=5) == '4d00m'
assert a.to_string(fields=1, precision=1) == '4d'
assert a.to_string(fields=1, precision=5) == '4d'
def test_to_string_scalar():
a = Angle(1.113355, unit=u.deg)
assert isinstance(a.to_string(), str)
def test_to_string_radian_with_precision():
"""
Regression test for a bug that caused ``to_string`` to crash for angles in
radians when specifying the precision.
"""
# Check that specifying the precision works
a = Angle(3., unit=u.rad)
assert a.to_string(precision=3, sep='fromunit') == '3.000rad'
def test_sexagesimal_round_down():
a1 = Angle(1, u.deg).to(u.hourangle)
a2 = Angle(2, u.deg)
assert a1.to_string() == '0h04m00s'
assert a2.to_string() == '2d00m00s'
def test_to_string_fields_colon():
a = Angle(1.113355, unit=u.deg)
assert a.to_string(fields=2, sep=':') == '1:07'
assert a.to_string(fields=3, sep=':') == '1:06:48.078'
assert a.to_string(fields=1, sep=':') == '1'
|
35fcafc9415a542aa506d74fe875daf8f8d978fcec7581cf51f4b5c4a34e4f92 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test initialization and other aspects of Angle and subclasses"""
import threading
import warnings
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
import astropy.units as u
from astropy.coordinates.angles import Longitude, Latitude, Angle
from astropy.coordinates.errors import (
IllegalSecondError, IllegalMinuteError, IllegalHourError,
IllegalSecondWarning, IllegalMinuteWarning)
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_create_angles():
"""
Tests creating and accessing Angle objects
"""
''' The "angle" is a fundamental object. The internal
representation is stored in radians, but this is transparent to the user.
Units *must* be specified rather than a default value be assumed. This is
as much for self-documenting code as anything else.
Angle objects simply represent a single angular coordinate. More specific
angular coordinates (e.g. Longitude, Latitude) are subclasses of Angle.'''
a1 = Angle(54.12412, unit=u.degree)
a2 = Angle("54.12412", unit=u.degree)
a3 = Angle("54:07:26.832", unit=u.degree)
a4 = Angle("54.12412 deg")
a5 = Angle("54.12412 degrees")
a6 = Angle("54.12412°") # because we like Unicode
a8 = Angle("54°07'26.832\"")
a9 = Angle([54, 7, 26.832], unit=u.degree)
assert_allclose(a9.value, [54, 7, 26.832])
assert a9.unit is u.degree
a10 = Angle(3.60827466667, unit=u.hour)
a11 = Angle("3:36:29.7888000120", unit=u.hour)
with pytest.warns(AstropyDeprecationWarning, match='hms_to_hour'):
a12 = Angle((3, 36, 29.7888000120), unit=u.hour) # *must* be a tuple
with pytest.warns(AstropyDeprecationWarning, match='hms_to_hour'):
# Regression test for #5001
a13 = Angle((3, 36, 29.7888000120), unit='hour')
Angle(0.944644098745, unit=u.radian)
with pytest.raises(u.UnitsError):
Angle(54.12412)
# raises an exception because this is ambiguous
with pytest.raises(u.UnitsError):
Angle(54.12412, unit=u.m)
with pytest.raises(ValueError):
Angle(12.34, unit="not a unit")
a14 = Angle("03h36m29.7888000120") # no trailing 's', but unambiguous
a15 = Angle("5h4m3s") # single digits, no decimal
assert a15.unit == u.hourangle
a16 = Angle("1 d")
a17 = Angle("1 degree")
assert a16.degree == 1
assert a17.degree == 1
a18 = Angle("54 07.4472", unit=u.degree)
a19 = Angle("54:07.4472", unit=u.degree)
a20 = Angle("54d07.4472m", unit=u.degree)
a21 = Angle("3h36m", unit=u.hour)
a22 = Angle("3.6h", unit=u.hour)
a23 = Angle("- 3h", unit=u.hour)
a24 = Angle("+ 3h", unit=u.hour)
# ensure the above angles that should match do
assert a1 == a2 == a3 == a4 == a5 == a6 == a8 == a18 == a19 == a20
assert_allclose(a1.radian, a2.radian)
assert_allclose(a2.degree, a3.degree)
assert_allclose(a3.radian, a4.radian)
assert_allclose(a4.radian, a5.radian)
assert_allclose(a5.radian, a6.radian)
assert_allclose(a10.degree, a11.degree)
assert a11 == a12 == a13 == a14
assert a21 == a22
assert a23 == -a24
# check for illegal ranges / values
with pytest.raises(IllegalSecondError):
a = Angle("12 32 99", unit=u.degree)
with pytest.raises(IllegalMinuteError):
a = Angle("12 99 23", unit=u.degree)
with pytest.raises(IllegalSecondError):
a = Angle("12 32 99", unit=u.hour)
with pytest.raises(IllegalMinuteError):
a = Angle("12 99 23", unit=u.hour)
with pytest.raises(IllegalHourError):
a = Angle("99 25 51.0", unit=u.hour)
with pytest.raises(ValueError):
a = Angle("12 25 51.0xxx", unit=u.hour)
with pytest.raises(ValueError):
a = Angle("12h34321m32.2s")
assert a1 is not None
def test_angle_from_view():
q = np.arange(3.) * u.deg
a = q.view(Angle)
assert type(a) is Angle
assert a.unit is q.unit
assert np.all(a == q)
q2 = np.arange(4) * u.m
with pytest.raises(u.UnitTypeError):
q2.view(Angle)
def test_angle_ops():
"""
Tests operations on Angle objects
"""
# Angles can be added and subtracted. Multiplication and division by a
# scalar is also permitted. A negative operator is also valid. All of
# these operate in a single dimension. Attempting to multiply or divide two
# Angle objects will return a quantity. An exception will be raised if it
# is attempted to store output with a non-angular unit in an Angle [#2718].
a1 = Angle(3.60827466667, unit=u.hour)
a2 = Angle("54:07:26.832", unit=u.degree)
a1 + a2 # creates new Angle object
a1 - a2
-a1
assert_allclose((a1 * 2).hour, 2 * 3.6082746666700003)
assert abs((a1 / 3.123456).hour - 3.60827466667 / 3.123456) < 1e-10
# commutativity
assert (2 * a1).hour == (a1 * 2).hour
a3 = Angle(a1) # makes a *copy* of the object, but identical content as a1
assert_allclose(a1.radian, a3.radian)
assert a1 is not a3
a4 = abs(-a1)
assert a4.radian == a1.radian
a5 = Angle(5.0, unit=u.hour)
assert a5 > a1
assert a5 >= a1
assert a1 < a5
assert a1 <= a5
# check operations with non-angular result give Quantity.
a6 = Angle(45., u.degree)
a7 = a6 * a5
assert type(a7) is u.Quantity
# but those with angular result yield Angle.
# (a9 is regression test for #5327)
a8 = a1 + 1.*u.deg
assert type(a8) is Angle
a9 = 1.*u.deg + a1
assert type(a9) is Angle
with pytest.raises(TypeError):
a6 *= a5
with pytest.raises(TypeError):
a6 *= u.m
with pytest.raises(TypeError):
np.sin(a6, out=a6)
def test_angle_methods():
# Most methods tested as part of the Quantity tests.
# A few tests here which caused problems before: #8368
a = Angle([0., 2.], 'deg')
a_mean = a.mean()
assert type(a_mean) is Angle
assert a_mean == 1. * u.degree
a_std = a.std()
assert type(a_std) is Angle
assert a_std == 1. * u.degree
a_var = a.var()
assert type(a_var) is u.Quantity
assert a_var == 1. * u.degree ** 2
a_ptp = a.ptp()
assert type(a_ptp) is Angle
assert a_ptp == 2. * u.degree
a_max = a.max()
assert type(a_max) is Angle
assert a_max == 2. * u.degree
a_min = a.min()
assert type(a_min) is Angle
assert a_min == 0. * u.degree
def test_angle_convert():
"""
Test unit conversion of Angle objects
"""
angle = Angle("54.12412", unit=u.degree)
assert_allclose(angle.hour, 3.60827466667)
assert_allclose(angle.radian, 0.944644098745)
assert_allclose(angle.degree, 54.12412)
assert len(angle.hms) == 3
assert isinstance(angle.hms, tuple)
assert angle.hms[0] == 3
assert angle.hms[1] == 36
assert_allclose(angle.hms[2], 29.78879999999947)
# also check that the namedtuple attribute-style access works:
assert angle.hms.h == 3
assert angle.hms.m == 36
assert_allclose(angle.hms.s, 29.78879999999947)
assert len(angle.dms) == 3
assert isinstance(angle.dms, tuple)
assert angle.dms[0] == 54
assert angle.dms[1] == 7
assert_allclose(angle.dms[2], 26.831999999992036)
# also check that the namedtuple attribute-style access works:
assert angle.dms.d == 54
assert angle.dms.m == 7
assert_allclose(angle.dms.s, 26.831999999992036)
assert isinstance(angle.dms[0], float)
assert isinstance(angle.hms[0], float)
# now make sure dms and signed_dms work right for negative angles
negangle = Angle("-54.12412", unit=u.degree)
assert negangle.dms.d == -54
assert negangle.dms.m == -7
assert_allclose(negangle.dms.s, -26.831999999992036)
assert negangle.signed_dms.sign == -1
assert negangle.signed_dms.d == 54
assert negangle.signed_dms.m == 7
assert_allclose(negangle.signed_dms.s, 26.831999999992036)
def test_angle_formatting():
"""
Tests string formatting for Angle objects
"""
'''
The string method of Angle has this signature:
def string(self, unit=DEGREE, decimal=False, sep=" ", precision=5,
pad=False):
The "decimal" parameter defaults to False since if you need to print the
Angle as a decimal, there's no need to use the "format" method (see
above).
'''
angle = Angle("54.12412", unit=u.degree)
# __str__ is the default `format`
assert str(angle) == angle.to_string()
res = 'Angle as HMS: 3h36m29.7888s'
assert f"Angle as HMS: {angle.to_string(unit=u.hour)}" == res
res = 'Angle as HMS: 3:36:29.7888'
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep=':')}" == res
res = 'Angle as HMS: 3:36:29.79'
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep=':', precision=2)}" == res
# Note that you can provide one, two, or three separators passed as a
# tuple or list
res = 'Angle as HMS: 3h36m29.7888s'
assert "Angle as HMS: {}".format(angle.to_string(unit=u.hour,
sep=("h", "m", "s"),
precision=4)) == res
res = 'Angle as HMS: 3-36|29.7888'
assert "Angle as HMS: {}".format(angle.to_string(unit=u.hour, sep=["-", "|"],
precision=4)) == res
res = 'Angle as HMS: 3-36-29.7888'
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep='-', precision=4)}" == res
res = 'Angle as HMS: 03h36m29.7888s'
assert f"Angle as HMS: {angle.to_string(unit=u.hour, precision=4, pad=True)}" == res
# Same as above, in degrees
angle = Angle("3 36 29.78880", unit=u.degree)
res = 'Angle as DMS: 3d36m29.7888s'
assert f"Angle as DMS: {angle.to_string(unit=u.degree)}" == res
res = 'Angle as DMS: 3:36:29.7888'
assert f"Angle as DMS: {angle.to_string(unit=u.degree, sep=':')}" == res
res = 'Angle as DMS: 3:36:29.79'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree, sep=":",
precision=2)) == res
# Note that you can provide one, two, or three separators passed as a
# tuple or list
res = 'Angle as DMS: 3d36m29.7888s'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree,
sep=("d", "m", "s"),
precision=4)) == res
res = 'Angle as DMS: 3-36|29.7888'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree, sep=["-", "|"],
precision=4)) == res
res = 'Angle as DMS: 3-36-29.7888'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree, sep="-",
precision=4)) == res
res = 'Angle as DMS: 03d36m29.7888s'
assert "Angle as DMS: {}".format(angle.to_string(unit=u.degree, precision=4,
pad=True)) == res
res = 'Angle as rad: 0.0629763rad'
assert f"Angle as rad: {angle.to_string(unit=u.radian)}" == res
res = 'Angle as rad decimal: 0.0629763'
assert f"Angle as rad decimal: {angle.to_string(unit=u.radian, decimal=True)}" == res
# check negative angles
angle = Angle(-1.23456789, unit=u.degree)
angle2 = Angle(-1.23456789, unit=u.hour)
assert angle.to_string() == '-1d14m04.444404s'
assert angle.to_string(pad=True) == '-01d14m04.444404s'
assert angle.to_string(unit=u.hour) == '-0h04m56.2962936s'
assert angle2.to_string(unit=u.hour, pad=True) == '-01h14m04.444404s'
assert angle.to_string(unit=u.radian, decimal=True) == '-0.0215473'
def test_to_string_vector():
# Regression test for the fact that vectorize doesn't work with Numpy 1.6
assert Angle([1./7., 1./7.], unit='deg').to_string()[0] == "0d08m34.28571429s"
assert Angle([1./7.], unit='deg').to_string()[0] == "0d08m34.28571429s"
assert Angle(1./7., unit='deg').to_string() == "0d08m34.28571429s"
def test_angle_format_roundtripping():
"""
Ensures that the string representation of an angle can be used to create a
new valid Angle.
"""
a1 = Angle(0, unit=u.radian)
a2 = Angle(10, unit=u.degree)
a3 = Angle(0.543, unit=u.degree)
a4 = Angle('1d2m3.4s')
assert Angle(str(a1)).degree == a1.degree
assert Angle(str(a2)).degree == a2.degree
assert Angle(str(a3)).degree == a3.degree
assert Angle(str(a4)).degree == a4.degree
# also check Longitude/Latitude
ra = Longitude('1h2m3.4s')
dec = Latitude('1d2m3.4s')
assert_allclose(Angle(str(ra)).degree, ra.degree)
assert_allclose(Angle(str(dec)).degree, dec.degree)
def test_radec():
"""
Tests creation/operations of Longitude and Latitude objects
"""
'''
Longitude and Latitude are objects that are subclassed from Angle. As with Angle, Longitude
and Latitude can parse any unambiguous format (tuples, formatted strings, etc.).
The intention is not to create an Angle subclass for every possible
coordinate object (e.g. galactic l, galactic b). However, equatorial Longitude/Latitude
are so prevalent in astronomy that it's worth creating ones for these
units. They will be noted as "special" in the docs and use of the just the
Angle class is to be used for other coordinate systems.
'''
with pytest.raises(u.UnitsError):
ra = Longitude("4:08:15.162342") # error - hours or degrees?
with pytest.raises(u.UnitsError):
ra = Longitude("-4:08:15.162342")
# the "smart" initializer allows >24 to automatically do degrees, but the
# Angle-based one does not
# TODO: adjust in 0.3 for whatever behavior is decided on
# ra = Longitude("26:34:15.345634") # unambiguous b/c hours don't go past 24
# assert_allclose(ra.degree, 26.570929342)
with pytest.raises(u.UnitsError):
ra = Longitude("26:34:15.345634")
# ra = Longitude(68)
with pytest.raises(u.UnitsError):
ra = Longitude(68)
with pytest.raises(u.UnitsError):
ra = Longitude(12)
with pytest.raises(ValueError):
ra = Longitude("garbage containing a d and no units")
ra = Longitude("12h43m23s")
assert_allclose(ra.hour, 12.7230555556)
# TODO: again, fix based on >24 behavior
# ra = Longitude((56,14,52.52))
with pytest.raises(u.UnitsError):
ra = Longitude((56, 14, 52.52))
with pytest.raises(u.UnitsError):
ra = Longitude((12, 14, 52)) # ambiguous w/o units
with pytest.warns(AstropyDeprecationWarning, match='hms_to_hours'):
ra = Longitude((12, 14, 52), unit=u.hour)
# Units can be specified
ra = Longitude("4:08:15.162342", unit=u.hour)
# TODO: this was the "smart" initializer behavior - adjust in 0.3 appropriately
# Where Longitude values are commonly found in hours or degrees, declination is
# nearly always specified in degrees, so this is the default.
# dec = Latitude("-41:08:15.162342")
with pytest.raises(u.UnitsError):
dec = Latitude("-41:08:15.162342")
dec = Latitude("-41:08:15.162342", unit=u.degree) # same as above
def test_negative_zero_dms():
# Test for DMS parser
a = Angle('-00:00:10', u.deg)
assert_allclose(a.degree, -10. / 3600.)
# Unicode minus
a = Angle('−00:00:10', u.deg)
assert_allclose(a.degree, -10. / 3600.)
def test_negative_zero_dm():
# Test for DM parser
a = Angle('-00:10', u.deg)
assert_allclose(a.degree, -10. / 60.)
def test_negative_zero_hms():
# Test for HMS parser
a = Angle('-00:00:10', u.hour)
assert_allclose(a.hour, -10. / 3600.)
def test_negative_zero_hm():
# Test for HM parser
a = Angle('-00:10', u.hour)
assert_allclose(a.hour, -10. / 60.)
def test_negative_sixty_hm():
# Test for HM parser
with pytest.warns(IllegalMinuteWarning):
a = Angle('-00:60', u.hour)
assert_allclose(a.hour, -1.)
def test_plus_sixty_hm():
# Test for HM parser
with pytest.warns(IllegalMinuteWarning):
a = Angle('00:60', u.hour)
assert_allclose(a.hour, 1.)
def test_negative_fifty_nine_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle('-00:59:60', u.deg)
assert_allclose(a.degree, -1.)
def test_plus_fifty_nine_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle('+00:59:60', u.deg)
assert_allclose(a.degree, 1.)
def test_negative_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle('-00:00:60', u.deg)
assert_allclose(a.degree, -1. / 60.)
def test_plus_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle('+00:00:60', u.deg)
assert_allclose(a.degree, 1. / 60.)
def test_angle_to_is_angle():
with pytest.warns(IllegalSecondWarning):
a = Angle('00:00:60', u.deg)
assert isinstance(a, Angle)
assert isinstance(a.to(u.rad), Angle)
def test_angle_to_quantity():
with pytest.warns(IllegalSecondWarning):
a = Angle('00:00:60', u.deg)
q = u.Quantity(a)
assert isinstance(q, u.Quantity)
assert q.unit is u.deg
def test_quantity_to_angle():
a = Angle(1.0*u.deg)
assert isinstance(a, Angle)
with pytest.raises(u.UnitsError):
Angle(1.0*u.meter)
a = Angle(1.0*u.hour)
assert isinstance(a, Angle)
assert a.unit is u.hourangle
with pytest.raises(u.UnitsError):
Angle(1.0*u.min)
def test_angle_string():
with pytest.warns(IllegalSecondWarning):
a = Angle('00:00:60', u.deg)
assert str(a) == '0d01m00s'
a = Angle('00:00:59S', u.deg)
assert str(a) == '-0d00m59s'
a = Angle('00:00:59N', u.deg)
assert str(a) == '0d00m59s'
a = Angle('00:00:59E', u.deg)
assert str(a) == '0d00m59s'
a = Angle('00:00:59W', u.deg)
assert str(a) == '-0d00m59s'
a = Angle('-00:00:10', u.hour)
assert str(a) == '-0h00m10s'
a = Angle('00:00:59E', u.hour)
assert str(a) == '0h00m59s'
a = Angle('00:00:59W', u.hour)
assert str(a) == '-0h00m59s'
a = Angle(3.2, u.radian)
assert str(a) == '3.2rad'
a = Angle(4.2, u.microarcsecond)
assert str(a) == '4.2uarcsec'
a = Angle('1.0uarcsec')
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle('1.0uarcsecN')
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle('1.0uarcsecS')
assert a.value == -1.0
assert a.unit == u.microarcsecond
a = Angle('1.0uarcsecE')
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle('1.0uarcsecW')
assert a.value == -1.0
assert a.unit == u.microarcsecond
a = Angle("3d")
assert_allclose(a.value, 3.0)
assert a.unit == u.degree
a = Angle("3dN")
assert str(a) == "3d00m00s"
assert a.unit == u.degree
a = Angle("3dS")
assert str(a) == "-3d00m00s"
assert a.unit == u.degree
a = Angle("3dE")
assert str(a) == "3d00m00s"
assert a.unit == u.degree
a = Angle("3dW")
assert str(a) == "-3d00m00s"
assert a.unit == u.degree
a = Angle('10"')
assert_allclose(a.value, 10.0)
assert a.unit == u.arcsecond
a = Angle("10'N")
assert_allclose(a.value, 10.0)
assert a.unit == u.arcminute
a = Angle("10'S")
assert_allclose(a.value, -10.0)
assert a.unit == u.arcminute
a = Angle("10'E")
assert_allclose(a.value, 10.0)
assert a.unit == u.arcminute
a = Angle("10'W")
assert_allclose(a.value, -10.0)
assert a.unit == u.arcminute
a = Angle('45°55′12″N')
assert str(a) == '45d55m12s'
assert_allclose(a.value, 45.92)
assert a.unit == u.deg
a = Angle('45°55′12″S')
assert str(a) == '-45d55m12s'
assert_allclose(a.value, -45.92)
assert a.unit == u.deg
a = Angle('45°55′12″E')
assert str(a) == '45d55m12s'
assert_allclose(a.value, 45.92)
assert a.unit == u.deg
a = Angle('45°55′12″W')
assert str(a) == '-45d55m12s'
assert_allclose(a.value, -45.92)
assert a.unit == u.deg
with pytest.raises(ValueError):
Angle('00h00m10sN')
with pytest.raises(ValueError):
Angle('45°55′12″NS')
def test_angle_repr():
assert 'Angle' in repr(Angle(0, u.deg))
assert 'Longitude' in repr(Longitude(0, u.deg))
assert 'Latitude' in repr(Latitude(0, u.deg))
a = Angle(0, u.deg)
repr(a)
def test_large_angle_representation():
"""Test that angles above 360 degrees can be output as strings,
in repr, str, and to_string. (regression test for #1413)"""
a = Angle(350, u.deg) + Angle(350, u.deg)
a.to_string()
a.to_string(u.hourangle)
repr(a)
repr(a.to(u.hourangle))
str(a)
str(a.to(u.hourangle))
def test_wrap_at_inplace():
a = Angle([-20, 150, 350, 360] * u.deg)
out = a.wrap_at('180d', inplace=True)
assert out is None
assert np.all(a.degree == np.array([-20., 150., -10., 0.]))
def test_latitude():
with pytest.raises(ValueError):
lat = Latitude(['91d', '89d'])
with pytest.raises(ValueError):
lat = Latitude('-91d')
lat = Latitude(['90d', '89d'])
# check that one can get items
assert lat[0] == 90 * u.deg
assert lat[1] == 89 * u.deg
# and that comparison with angles works
assert np.all(lat == Angle(['90d', '89d']))
# check setitem works
lat[1] = 45. * u.deg
assert np.all(lat == Angle(['90d', '45d']))
# but not with values out of range
with pytest.raises(ValueError):
lat[0] = 90.001 * u.deg
with pytest.raises(ValueError):
lat[0] = -90.001 * u.deg
# these should also not destroy input (#1851)
assert np.all(lat == Angle(['90d', '45d']))
# conserve type on unit change (closes #1423)
angle = lat.to('radian')
assert type(angle) is Latitude
# but not on calculations
angle = lat - 190 * u.deg
assert type(angle) is Angle
assert angle[0] == -100 * u.deg
lat = Latitude('80d')
angle = lat / 2.
assert type(angle) is Angle
assert angle == 40 * u.deg
angle = lat * 2.
assert type(angle) is Angle
assert angle == 160 * u.deg
angle = -lat
assert type(angle) is Angle
assert angle == -80 * u.deg
# Test errors when trying to interoperate with longitudes.
with pytest.raises(TypeError) as excinfo:
lon = Longitude(10, 'deg')
lat = Latitude(lon)
assert "A Latitude angle cannot be created from a Longitude angle" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
lon = Longitude(10, 'deg')
lat = Latitude([20], 'deg')
lat[0] = lon
assert "A Longitude angle cannot be assigned to a Latitude angle" in str(excinfo.value)
# Check we can work around the Lat vs Long checks by casting explicitly to Angle.
lon = Longitude(10, 'deg')
lat = Latitude(Angle(lon))
assert lat.value == 10.0
# Check setitem.
lon = Longitude(10, 'deg')
lat = Latitude([20], 'deg')
lat[0] = Angle(lon)
assert lat.value[0] == 10.0
def test_longitude():
# Default wrapping at 360d with an array input
lon = Longitude(['370d', '88d'])
assert np.all(lon == Longitude(['10d', '88d']))
assert np.all(lon == Angle(['10d', '88d']))
# conserve type on unit change and keep wrap_angle (closes #1423)
angle = lon.to('hourangle')
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
angle = lon[0]
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
angle = lon[1:]
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
# but not on calculations
angle = lon / 2.
assert np.all(angle == Angle(['5d', '44d']))
assert type(angle) is Angle
assert not hasattr(angle, 'wrap_angle')
angle = lon * 2. + 400 * u.deg
assert np.all(angle == Angle(['420d', '576d']))
assert type(angle) is Angle
# Test setting a mutable value and having it wrap
lon[1] = -10 * u.deg
assert np.all(lon == Angle(['10d', '350d']))
# Test wrapping and try hitting some edge cases
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian)
assert np.all(lon.degree == np.array([0., 90, 180, 270, 0]))
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian, wrap_angle='180d')
assert np.all(lon.degree == np.array([0., 90, -180, -90, 0]))
# Wrap on setting wrap_angle property (also test auto-conversion of wrap_angle to an Angle)
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian)
lon.wrap_angle = '180d'
assert np.all(lon.degree == np.array([0., 90, -180, -90, 0]))
lon = Longitude('460d')
assert lon == Angle('100d')
lon.wrap_angle = '90d'
assert lon == Angle('-260d')
# check that if we initialize a longitude with another longitude,
# wrap_angle is kept by default
lon2 = Longitude(lon)
assert lon2.wrap_angle == lon.wrap_angle
# but not if we explicitly set it
lon3 = Longitude(lon, wrap_angle='180d')
assert lon3.wrap_angle == 180 * u.deg
# check that wrap_angle is always an Angle
lon = Longitude(lon, wrap_angle=Longitude(180 * u.deg))
assert lon.wrap_angle == 180 * u.deg
assert lon.wrap_angle.__class__ is Angle
# check that wrap_angle is not copied
wrap_angle=180 * u.deg
lon = Longitude(lon, wrap_angle=wrap_angle)
assert lon.wrap_angle == 180 * u.deg
assert np.may_share_memory(lon.wrap_angle, wrap_angle)
# check for problem reported in #2037 about Longitude initializing to -0
lon = Longitude(0, u.deg)
lonstr = lon.to_string()
assert not lonstr.startswith('-')
# also make sure dtype is correctly conserved
assert Longitude(0, u.deg, dtype=float).dtype == np.dtype(float)
assert Longitude(0, u.deg, dtype=int).dtype == np.dtype(int)
# Test errors when trying to interoperate with latitudes.
with pytest.raises(TypeError) as excinfo:
lat = Latitude(10, 'deg')
lon = Longitude(lat)
assert "A Longitude angle cannot be created from a Latitude angle" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
lat = Latitude(10, 'deg')
lon = Longitude([20], 'deg')
lon[0] = lat
assert "A Latitude angle cannot be assigned to a Longitude angle" in str(excinfo.value)
# Check we can work around the Lat vs Long checks by casting explicitly to Angle.
lat = Latitude(10, 'deg')
lon = Longitude(Angle(lat))
assert lon.value == 10.0
# Check setitem.
lat = Latitude(10, 'deg')
lon = Longitude([20], 'deg')
lon[0] = Angle(lat)
assert lon.value[0] == 10.0
def test_wrap_at():
a = Angle([-20, 150, 350, 360] * u.deg)
assert np.all(a.wrap_at(360 * u.deg).degree == np.array([340., 150., 350., 0.]))
assert np.all(a.wrap_at(Angle(360, unit=u.deg)).degree == np.array([340., 150., 350., 0.]))
assert np.all(a.wrap_at('360d').degree == np.array([340., 150., 350., 0.]))
assert np.all(a.wrap_at('180d').degree == np.array([-20., 150., -10., 0.]))
assert np.all(a.wrap_at(np.pi * u.rad).degree == np.array([-20., 150., -10., 0.]))
# Test wrapping a scalar Angle
a = Angle('190d')
assert a.wrap_at('180d') == Angle('-170d')
a = Angle(np.arange(-1000.0, 1000.0, 0.125), unit=u.deg)
for wrap_angle in (270, 0.2, 0.0, 360.0, 500, -2000.125):
aw = a.wrap_at(wrap_angle * u.deg)
assert np.all(aw.degree >= wrap_angle - 360.0)
assert np.all(aw.degree < wrap_angle)
aw = a.to(u.rad).wrap_at(wrap_angle * u.deg)
assert np.all(aw.degree >= wrap_angle - 360.0)
assert np.all(aw.degree < wrap_angle)
def test_is_within_bounds():
a = Angle([-20, 150, 350] * u.deg)
assert a.is_within_bounds('0d', '360d') is False
assert a.is_within_bounds(None, '360d') is True
assert a.is_within_bounds(-30 * u.deg, None) is True
a = Angle('-20d')
assert a.is_within_bounds('0d', '360d') is False
assert a.is_within_bounds(None, '360d') is True
assert a.is_within_bounds(-30 * u.deg, None) is True
def test_angle_mismatched_unit():
a = Angle('+6h7m8s', unit=u.degree)
assert_allclose(a.value, 91.78333333333332)
def test_regression_formatting_negative():
# Regression test for a bug that caused:
#
# >>> Angle(-1., unit='deg').to_string()
# '-1d00m-0s'
assert Angle(-0., unit='deg').to_string() == '-0d00m00s'
assert Angle(-1., unit='deg').to_string() == '-1d00m00s'
assert Angle(-0., unit='hour').to_string() == '-0h00m00s'
assert Angle(-1., unit='hour').to_string() == '-1h00m00s'
def test_regression_formatting_default_precision():
# Regression test for issue #11140
assert Angle('10:20:30.12345678d').to_string() == '10d20m30.12345678s'
assert Angle('10d20m30.123456784564s').to_string() == '10d20m30.12345678s'
assert Angle('10d20m30.123s').to_string() == '10d20m30.123s'
def test_empty_sep():
a = Angle('05h04m31.93830s')
assert a.to_string(sep='', precision=2, pad=True) == '050431.94'
def test_create_tuple():
"""
Tests creation of an angle with an (h,m,s) tuple
(d, m, s) tuples are not tested because of sign ambiguity issues (#13162)
"""
with pytest.warns(AstropyDeprecationWarning, match='hms_to_hours'):
a1 = Angle((1, 30, 0), unit=u.hourangle)
assert a1.value == 1.5
def test_list_of_quantities():
a1 = Angle([1*u.deg, 1*u.hourangle])
assert a1.unit == u.deg
assert_allclose(a1.value, [1, 15])
a2 = Angle([1*u.hourangle, 1*u.deg], u.deg)
assert a2.unit == u.deg
assert_allclose(a2.value, [15, 1])
def test_multiply_divide():
# Issue #2273
a1 = Angle([1, 2, 3], u.deg)
a2 = Angle([4, 5, 6], u.deg)
a3 = a1 * a2
assert_allclose(a3.value, [4, 10, 18])
assert a3.unit == (u.deg * u.deg)
a3 = a1 / a2
assert_allclose(a3.value, [.25, .4, .5])
assert a3.unit == u.dimensionless_unscaled
def test_mixed_string_and_quantity():
a1 = Angle(['1d', 1. * u.deg])
assert_array_equal(a1.value, [1., 1.])
assert a1.unit == u.deg
a2 = Angle(['1d', 1 * u.rad * np.pi, '3d'])
assert_array_equal(a2.value, [1., 180., 3.])
assert a2.unit == u.deg
def test_array_angle_tostring():
aobj = Angle([1, 2], u.deg)
assert aobj.to_string().dtype.kind == 'U'
assert np.all(aobj.to_string() == ['1d00m00s', '2d00m00s'])
def test_wrap_at_without_new():
"""
Regression test for subtle bugs from situations where an Angle is
created via numpy channels that don't do the standard __new__ but instead
depend on array_finalize to set state. Longitude is used because the
bug was in its _wrap_angle not getting initialized correctly
"""
l1 = Longitude([1]*u.deg)
l2 = Longitude([2]*u.deg)
l = np.concatenate([l1, l2])
assert l._wrap_angle is not None
def test__str__():
"""
Check the __str__ method used in printing the Angle
"""
# scalar angle
scangle = Angle('10.2345d')
strscangle = scangle.__str__()
assert strscangle == '10d14m04.2s'
# non-scalar array angles
arrangle = Angle(['10.2345d', '-20d'])
strarrangle = arrangle.__str__()
assert strarrangle == '[10d14m04.2s -20d00m00s]'
# summarizing for large arrays, ... should appear
bigarrangle = Angle(np.ones(10000), u.deg)
assert '...' in bigarrangle.__str__()
def test_repr_latex():
"""
Check the _repr_latex_ method, used primarily by IPython notebooks
"""
# try with both scalar
scangle = Angle(2.1, u.deg)
rlscangle = scangle._repr_latex_()
# and array angles
arrangle = Angle([1, 2.1], u.deg)
rlarrangle = arrangle._repr_latex_()
assert rlscangle == r'$2^\circ06{}^\prime00{}^{\prime\prime}$'
assert rlscangle.split('$')[1] in rlarrangle
# make sure the ... appears for large arrays
bigarrangle = Angle(np.ones(50000)/50000., u.deg)
assert '...' in bigarrangle._repr_latex_()
def test_angle_with_cds_units_enabled():
"""Regression test for #5350
Especially the example in
https://github.com/astropy/astropy/issues/5350#issuecomment-248770151
"""
from astropy.units import cds
# the problem is with the parser, so remove it temporarily
from astropy.coordinates.angle_formats import _AngleParser
del _AngleParser._thread_local._parser
with cds.enable():
Angle('5d')
del _AngleParser._thread_local._parser
Angle('5d')
def test_longitude_nan():
# Check that passing a NaN to Longitude doesn't raise a warning
Longitude([0, np.nan, 1] * u.deg)
def test_latitude_nan():
# Check that passing a NaN to Latitude doesn't raise a warning
Latitude([0, np.nan, 1] * u.deg)
def test_angle_wrap_at_nan():
# Check that no attempt is made to wrap a NaN angle
angle = Angle([0, np.nan, 1] * u.deg)
angle.flags.writeable = False # to force an error if a write is attempted
angle.wrap_at(180*u.deg, inplace=True)
def test_angle_multithreading():
"""
Regression test for issue #7168
"""
angles = ['00:00:00']*10000
def parse_test(i=0):
Angle(angles, unit='hour')
for i in range(10):
threading.Thread(target=parse_test, args=(i,)).start()
@pytest.mark.parametrize("cls", [Angle, Longitude, Latitude])
@pytest.mark.parametrize("input, expstr, exprepr",
[(np.nan*u.deg,
"nan",
"nan deg"),
([np.nan, 5, 0]*u.deg,
"[nan 5d00m00s 0d00m00s]",
"[nan, 5., 0.] deg"),
([6, np.nan, 0]*u.deg,
"[6d00m00s nan 0d00m00s]",
"[6., nan, 0.] deg"),
([np.nan, np.nan, np.nan]*u.deg,
"[nan nan nan]",
"[nan, nan, nan] deg"),
(np.nan*u.hour,
"nan",
"nan hourangle"),
([np.nan, 5, 0]*u.hour,
"[nan 5h00m00s 0h00m00s]",
"[nan, 5., 0.] hourangle"),
([6, np.nan, 0]*u.hour,
"[6h00m00s nan 0h00m00s]",
"[6., nan, 0.] hourangle"),
([np.nan, np.nan, np.nan]*u.hour,
"[nan nan nan]",
"[nan, nan, nan] hourangle"),
(np.nan*u.rad,
"nan",
"nan rad"),
([np.nan, 1, 0]*u.rad,
"[nan 1rad 0rad]",
"[nan, 1., 0.] rad"),
([1.50, np.nan, 0]*u.rad,
"[1.5rad nan 0rad]",
"[1.5, nan, 0.] rad"),
([np.nan, np.nan, np.nan]*u.rad,
"[nan nan nan]",
"[nan, nan, nan] rad")])
def test_str_repr_angles_nan(cls, input, expstr, exprepr):
"""
Regression test for issue #11473
"""
q = cls(input)
assert str(q) == expstr
# Deleting whitespaces since repr appears to be adding them for some values
# making the test fail.
assert repr(q).replace(" ", "") == f'<{cls.__name__}{exprepr}>'.replace(" ","")
|
fa733bbae35857fa02f1cee59105aba4b9367a15234b3834a96b1d97e181c726 | from contextlib import nullcontext
import astropy.units as u
import numpy as np
from numpy.testing import assert_allclose
import pytest
from astropy import time
from astropy.constants import c
from astropy.table import Table
from astropy.time import Time
from astropy.utils import iers
from astropy.coordinates import (SkyCoord, EarthLocation, ICRS, GCRS, Galactic,
CartesianDifferential,
get_body_barycentric_posvel,
FK5, CartesianRepresentation,
SpectralQuantity)
from astropy.tests.helper import assert_quantity_allclose, quantity_allclose
from astropy.utils.exceptions import AstropyUserWarning, AstropyWarning
from astropy.utils.data import get_pkg_data_filename
from astropy.coordinates.spectral_coordinate import SpectralCoord, _apply_relativistic_doppler_shift
from astropy.wcs.wcsapi.fitswcs import VELOCITY_FRAMES as FITSWCS_VELOCITY_FRAMES
def assert_frame_allclose(frame1, frame2,
pos_rtol=1e-7, pos_atol=1 * u.m,
vel_rtol=1e-7, vel_atol=1 * u.mm / u.s):
# checks that:
# - the positions are equal to within some tolerance (the relative tolerance
# should be dimensionless, the absolute tolerance should be a distance).
# note that these are the tolerances *in 3d*
# - either both or nether frame has velocities, or if one has no velocities
# the other one can have zero velocities
# - if velocities are present, they are equal to some tolerance
# Ideally this should accept both frames and SkyCoords
if hasattr(frame1, 'frame'): # SkyCoord-like
frame1 = frame1.frame
if hasattr(frame2, 'frame'): # SkyCoord-like
frame2 = frame2.frame
# assert (frame1.data.differentials and frame2.data.differentials or
# (not frame1.data.differentials and not frame2.data.differentials))
assert frame1.is_equivalent_frame(frame2)
frame2_in_1 = frame2.transform_to(frame1)
assert_quantity_allclose(0 * u.m, frame1.separation_3d(frame2_in_1), rtol=pos_rtol, atol=pos_atol)
if frame1.data.differentials:
d1 = frame1.data.represent_as(CartesianRepresentation, CartesianDifferential).differentials['s']
d2 = frame2_in_1.data.represent_as(CartesianRepresentation, CartesianDifferential).differentials['s']
assert_quantity_allclose(d1.norm(d1), d1.norm(d2), rtol=vel_rtol, atol=vel_atol)
def get_greenwich_earthlocation():
"""
A helper function to get an EarthLocation for greenwich (without trying to
do a download)
"""
site_registry = EarthLocation._get_site_registry(force_builtin=True)
return site_registry.get('greenwich')
# GENERAL TESTS
# We first run through a series of cases to test different ways of initializing
# the observer and target for SpectralCoord, including for example frames,
# SkyCoords, and making sure that SpectralCoord is not sensitive to the actual
# frame or representation class.
# Local Standard of Rest
LSRD = Galactic(u=0.1 * u.km, v=0.1 * u.km, w=0.1 * u.km,
U=9 * u.km / u.s, V=12 * u.km / u.s, W=7 * u.km / u.s,
representation_type='cartesian', differential_type='cartesian')
LSRD_EQUIV = [
LSRD,
SkyCoord(LSRD), # as a SkyCoord
LSRD.transform_to(ICRS()), # different frame
LSRD.transform_to(ICRS()).transform_to(Galactic()) # different representation
]
@pytest.fixture(params=[None] + LSRD_EQUIV)
def observer(request):
return request.param
# Target located in direction of motion of LSRD with no velocities
LSRD_DIR_STATIONARY = Galactic(u=9 * u.km, v=12 * u.km, w=7 * u.km,
representation_type='cartesian')
LSRD_DIR_STATIONARY_EQUIV = [
LSRD_DIR_STATIONARY,
SkyCoord(LSRD_DIR_STATIONARY), # as a SkyCoord
LSRD_DIR_STATIONARY.transform_to(FK5()), # different frame
LSRD_DIR_STATIONARY.transform_to(ICRS()).transform_to(Galactic()) # different representation
]
@pytest.fixture(params=[None] + LSRD_DIR_STATIONARY_EQUIV)
def target(request):
return request.param
def test_create_spectral_coord_observer_target(observer, target):
with nullcontext() if target is None else pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
coord = SpectralCoord([100, 200, 300] * u.nm, observer=observer, target=target)
if observer is None:
assert coord.observer is None
else:
assert_frame_allclose(observer, coord.observer)
if target is None:
assert coord.target is None
else:
assert_frame_allclose(target, coord.target)
assert coord.doppler_rest is None
assert coord.doppler_convention is None
if observer is None or target is None:
assert quantity_allclose(coord.redshift, 0)
assert quantity_allclose(coord.radial_velocity, 0 * u.km/u.s)
elif (any(observer is lsrd for lsrd in LSRD_EQUIV)
and any(target is lsrd for lsrd in LSRD_DIR_STATIONARY_EQUIV)):
assert_quantity_allclose(coord.radial_velocity, -274 ** 0.5 * u.km / u.s, atol=1e-4 * u.km / u.s)
assert_quantity_allclose(coord.redshift, -5.5213158163147646e-05, atol=1e-9)
else:
raise NotImplementedError()
def test_create_from_spectral_coord(observer, target):
"""
Checks that parameters are correctly copied to the new SpectralCoord object
"""
with nullcontext() if target is None else pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
spec_coord1 = SpectralCoord([100, 200, 300] * u.nm, observer=observer,
target=target, doppler_convention='optical',
doppler_rest=6000*u.AA)
spec_coord2 = SpectralCoord(spec_coord1)
assert spec_coord1.observer == spec_coord2.observer
assert spec_coord1.target == spec_coord2.target
assert spec_coord1.radial_velocity == spec_coord2.radial_velocity
assert spec_coord1.doppler_convention == spec_coord2.doppler_convention
assert spec_coord1.doppler_rest == spec_coord2.doppler_rest
# INTERNAL FUNCTIONS TESTS
def test_apply_relativistic_doppler_shift():
# Frequency
sq1 = SpectralQuantity(1 * u.GHz)
sq2 = _apply_relativistic_doppler_shift(sq1, 0.5 * c)
assert_quantity_allclose(sq2, np.sqrt(1. / 3.) * u.GHz)
# Wavelength
sq3 = SpectralQuantity(500 * u.nm)
sq4 = _apply_relativistic_doppler_shift(sq3, 0.5 * c)
assert_quantity_allclose(sq4, np.sqrt(3) * 500 * u.nm)
# Energy
sq5 = SpectralQuantity(300 * u.eV)
sq6 = _apply_relativistic_doppler_shift(sq5, 0.5 * c)
assert_quantity_allclose(sq6, np.sqrt(1. / 3.) * 300 * u.eV)
# Wavenumber
sq7 = SpectralQuantity(0.01 / u.micron)
sq8 = _apply_relativistic_doppler_shift(sq7, 0.5 * c)
assert_quantity_allclose(sq8, np.sqrt(1. / 3.) * 0.01 / u.micron)
# Velocity (doppler_convention='relativistic')
sq9 = SpectralQuantity(200 * u.km / u.s, doppler_convention='relativistic', doppler_rest=1 * u.GHz)
sq10 = _apply_relativistic_doppler_shift(sq9, 300 * u.km / u.s)
assert_quantity_allclose(sq10, 499.999666 * u.km / u.s)
assert sq10.doppler_convention == 'relativistic'
# Velocity (doppler_convention='optical')
sq11 = SpectralQuantity(200 * u.km / u.s, doppler_convention='radio', doppler_rest=1 * u.GHz)
sq12 = _apply_relativistic_doppler_shift(sq11, 300 * u.km / u.s)
assert_quantity_allclose(sq12, 499.650008 * u.km / u.s)
assert sq12.doppler_convention == 'radio'
# Velocity (doppler_convention='radio')
sq13 = SpectralQuantity(200 * u.km / u.s, doppler_convention='optical', doppler_rest=1 * u.GHz)
sq14 = _apply_relativistic_doppler_shift(sq13, 300 * u.km / u.s)
assert_quantity_allclose(sq14, 500.350493 * u.km / u.s)
assert sq14.doppler_convention == 'optical'
# Velocity - check relativistic velocity addition
sq13 = SpectralQuantity(0 * u.km / u.s, doppler_convention='relativistic', doppler_rest=1 * u.GHz)
sq14 = _apply_relativistic_doppler_shift(sq13, 0.999 * c)
assert_quantity_allclose(sq14, 0.999 * c)
sq14 = _apply_relativistic_doppler_shift(sq14, 0.999 * c)
assert_quantity_allclose(sq14, (0.999 * 2) / (1 + 0.999**2) * c)
assert sq14.doppler_convention == 'relativistic'
# Cases that should raise errors
sq15 = SpectralQuantity(200 * u.km / u.s)
with pytest.raises(ValueError, match='doppler_convention not set'):
_apply_relativistic_doppler_shift(sq15, 300 * u.km / u.s)
sq16 = SpectralQuantity(200 * u.km / u.s, doppler_rest=10 * u.GHz)
with pytest.raises(ValueError, match='doppler_convention not set'):
_apply_relativistic_doppler_shift(sq16, 300 * u.km / u.s)
sq17 = SpectralQuantity(200 * u.km / u.s, doppler_convention='optical')
with pytest.raises(ValueError, match='doppler_rest not set'):
_apply_relativistic_doppler_shift(sq17, 300 * u.km / u.s)
# BASIC TESTS
def test_init_quantity():
sc = SpectralCoord(10 * u.GHz)
assert sc.value == 10.
assert sc.unit is u.GHz
assert sc.doppler_convention is None
assert sc.doppler_rest is None
assert sc.observer is None
assert sc.target is None
def test_init_spectral_quantity():
sc = SpectralCoord(SpectralQuantity(10 * u.GHz, doppler_convention='optical'))
assert sc.value == 10.
assert sc.unit is u.GHz
assert sc.doppler_convention == 'optical'
assert sc.doppler_rest is None
assert sc.observer is None
assert sc.target is None
def test_init_too_many_args():
with pytest.raises(ValueError, match='Cannot specify radial velocity or redshift if both'):
SpectralCoord(10 * u.GHz, observer=LSRD, target=SkyCoord(10, 20, unit='deg'),
radial_velocity=1 * u.km / u.s)
with pytest.raises(ValueError, match='Cannot specify radial velocity or redshift if both'):
SpectralCoord(10 * u.GHz, observer=LSRD, target=SkyCoord(10, 20, unit='deg'),
redshift=1)
with pytest.raises(ValueError, match='Cannot set both a radial velocity and redshift'):
SpectralCoord(10 * u.GHz, radial_velocity=1 * u.km / u.s, redshift=1)
def test_init_wrong_type():
with pytest.raises(TypeError, match='observer must be a SkyCoord or coordinate frame instance'):
SpectralCoord(10 * u.GHz, observer=3.4)
with pytest.raises(TypeError, match='target must be a SkyCoord or coordinate frame instance'):
SpectralCoord(10 * u.GHz, target=3.4)
with pytest.raises(u.UnitsError, match="Argument 'radial_velocity' to function "
"'__new__' must be in units convertible to 'km / s'"):
SpectralCoord(10 * u.GHz, radial_velocity=1 * u.kg)
with pytest.raises(TypeError, match="Argument 'radial_velocity' to function "
"'__new__' has no 'unit' attribute. You should "
"pass in an astropy Quantity instead."):
SpectralCoord(10 * u.GHz, radial_velocity='banana')
with pytest.raises(u.UnitsError, match='redshift should be dimensionless'):
SpectralCoord(10 * u.GHz, redshift=1 * u.m)
with pytest.raises(TypeError, match='Cannot parse "banana" as a Quantity. It does not start with a number.'):
SpectralCoord(10 * u.GHz, redshift='banana')
def test_observer_init_rv_behavior():
"""
Test basic initialization behavior or observer/target and redshift/rv
"""
# Start off by specifying the radial velocity only
sc_init = SpectralCoord([4000, 5000]*u.AA,
radial_velocity=100*u.km/u.s)
assert sc_init.observer is None
assert sc_init.target is None
assert_quantity_allclose(sc_init.radial_velocity, 100*u.km/u.s)
# Next, set the observer, and check that the radial velocity hasn't changed
with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
sc_init.observer = ICRS(CartesianRepresentation([0*u.km, 0*u.km, 0*u.km]))
assert sc_init.observer is not None
assert_quantity_allclose(sc_init.radial_velocity, 100*u.km/u.s)
# Setting the target should now cause the original radial velocity to be
# dropped in favor of the automatically computed one
sc_init.target = SkyCoord(CartesianRepresentation([1*u.km, 0*u.km, 0*u.km]),
frame='icrs', radial_velocity=30 * u.km / u.s)
assert sc_init.target is not None
assert_quantity_allclose(sc_init.radial_velocity, 30 * u.km / u.s)
# The observer can only be set if originally None - now that it isn't
# setting it again should fail
with pytest.raises(ValueError, match='observer has already been set'):
sc_init.observer = GCRS(CartesianRepresentation([0*u.km, 1*u.km, 0*u.km]))
# And similarly, changing the target should not be possible
with pytest.raises(ValueError, match='target has already been set'):
sc_init.target = GCRS(CartesianRepresentation([0*u.km, 1*u.km, 0*u.km]))
def test_rv_redshift_initialization():
# Check that setting the redshift sets the radial velocity appropriately,
# and that the redshift can be recovered
sc_init = SpectralCoord([4000, 5000]*u.AA, redshift=1)
assert isinstance(sc_init.redshift, u.Quantity)
assert_quantity_allclose(sc_init.redshift, 1*u.dimensionless_unscaled)
assert_quantity_allclose(sc_init.radial_velocity, 0.6 * c)
# Check that setting the same radial velocity produces the same redshift
# and that the radial velocity can be recovered
sc_init2 = SpectralCoord([4000, 5000]*u.AA, radial_velocity=0.6 * c)
assert_quantity_allclose(sc_init2.redshift, 1*u.dimensionless_unscaled)
assert_quantity_allclose(sc_init2.radial_velocity, 0.6 * c)
# Check that specifying redshift as a quantity works
sc_init3 = SpectralCoord([4000, 5000]*u.AA, redshift=1 * u.one)
assert sc_init.redshift == sc_init3.redshift
# Make sure that both redshift and radial velocity can't be specified at
# the same time.
with pytest.raises(ValueError, match='Cannot set both a radial velocity and redshift'):
SpectralCoord([4000, 5000]*u.AA,
radial_velocity=10*u.km/u.s,
redshift=2)
def test_replicate():
# The replicate method makes a new object with attributes updated, but doesn't
# do any conversion
sc_init = SpectralCoord([4000, 5000]*u.AA, redshift=2)
sc_set_rv = sc_init.replicate(redshift=1)
assert_quantity_allclose(sc_set_rv.radial_velocity, 0.6 * c)
assert_quantity_allclose(sc_init, [4000, 5000] * u.AA)
sc_set_rv = sc_init.replicate(radial_velocity=c / 2)
assert_quantity_allclose(sc_set_rv.redshift, np.sqrt(3) - 1)
assert_quantity_allclose(sc_init, [4000, 5000] * u.AA)
gcrs_origin = GCRS(CartesianRepresentation([0*u.km, 0*u.km, 0*u.km]))
with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
sc_init2 = SpectralCoord([4000, 5000]*u.AA, redshift=1,
observer=gcrs_origin)
with np.errstate(all='ignore'):
sc_init2.replicate(redshift=.5)
assert_quantity_allclose(sc_init2, [4000, 5000] * u.AA)
with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
sc_init3 = SpectralCoord([4000, 5000]*u.AA, redshift=1,
target=gcrs_origin)
with np.errstate(all='ignore'):
sc_init3.replicate(redshift=.5)
assert_quantity_allclose(sc_init2, [4000, 5000] * u.AA)
with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
sc_init4 = SpectralCoord([4000, 5000]*u.AA,
observer=gcrs_origin, target=gcrs_origin)
with pytest.raises(ValueError, match='Cannot specify radial velocity or redshift if both target and observer are specified'):
sc_init4.replicate(redshift=.5)
sc_init = SpectralCoord([4000, 5000]*u.AA, redshift=2)
sc_init_copy = sc_init.replicate(copy=True)
sc_init[0] = 6000 * u.AA
assert_quantity_allclose(sc_init_copy, [4000, 5000] * u.AA)
sc_init = SpectralCoord([4000, 5000]*u.AA, redshift=2)
sc_init_ref = sc_init.replicate()
sc_init[0] = 6000 * u.AA
assert_quantity_allclose(sc_init_ref, [6000, 5000] * u.AA)
def test_with_observer_stationary_relative_to():
# Simple tests of with_observer_stationary_relative_to to cover different
# ways of calling it
# The replicate method makes a new object with attributes updated, but doesn't
# do any conversion
sc1 = SpectralCoord([4000, 5000]*u.AA)
with pytest.raises(ValueError, match='This method can only be used if both '
'observer and target are defined on the '
'SpectralCoord'):
sc1.with_observer_stationary_relative_to('icrs')
sc2 = SpectralCoord([4000, 5000] * u.AA,
observer=ICRS(0 * u.km, 0 * u.km, 0 * u.km,
-1 * u.km / u.s, 0 * u.km / u.s, -1 * u.km / u.s,
representation_type='cartesian',
differential_type='cartesian'),
target=ICRS(0 * u.deg, 45 * u.deg, distance=1 * u.kpc, radial_velocity=2 * u.km / u.s))
# Motion of observer is in opposite direction to target
assert_quantity_allclose(sc2.radial_velocity, (2 + 2 ** 0.5) * u.km / u.s)
# Change to observer that is stationary in ICRS
sc3 = sc2.with_observer_stationary_relative_to('icrs')
# Velocity difference is now pure radial velocity of target
assert_quantity_allclose(sc3.radial_velocity, 2 * u.km / u.s)
# Check setting the velocity in with_observer_stationary_relative_to
sc4 = sc2.with_observer_stationary_relative_to('icrs', velocity=[-2**0.5, 0, -2**0.5] * u.km / u.s)
# Observer once again moving away from target but faster
assert_quantity_allclose(sc4.radial_velocity, 4 * u.km / u.s)
# Check that we can also pass frame classes instead of names
sc5 = sc2.with_observer_stationary_relative_to(ICRS, velocity=[-2**0.5, 0, -2**0.5] * u.km / u.s)
assert_quantity_allclose(sc5.radial_velocity, 4 * u.km / u.s)
# And make sure we can also pass instances of classes without data
sc6 = sc2.with_observer_stationary_relative_to(ICRS(), velocity=[-2**0.5, 0, -2**0.5] * u.km / u.s)
assert_quantity_allclose(sc6.radial_velocity, 4 * u.km / u.s)
# And with data provided no velocities are present
sc7 = sc2.with_observer_stationary_relative_to(ICRS(0 * u.km, 0 * u.km, 0 * u.km,
representation_type='cartesian'),
velocity=[-2**0.5, 0, -2**0.5] * u.km / u.s)
assert_quantity_allclose(sc7.radial_velocity, 4 * u.km / u.s)
# And also have the ability to pass frames with velocities already defined
sc8 = sc2.with_observer_stationary_relative_to(ICRS(0 * u.km, 0 * u.km, 0 * u.km,
2**0.5 * u.km / u.s, 0 * u.km / u.s, 2**0.5 * u.km / u.s,
representation_type='cartesian',
differential_type='cartesian'))
assert_quantity_allclose(sc8.radial_velocity, 0 * u.km / u.s, atol=1e-10 * u.km / u.s)
# Make sure that things work properly if passing a SkyCoord
sc9 = sc2.with_observer_stationary_relative_to(SkyCoord(ICRS(0 * u.km, 0 * u.km, 0 * u.km,
representation_type='cartesian')),
velocity=[-2**0.5, 0, -2**0.5] * u.km / u.s)
assert_quantity_allclose(sc9.radial_velocity, 4 * u.km / u.s)
sc10 = sc2.with_observer_stationary_relative_to(SkyCoord(ICRS(0 * u.km, 0 * u.km, 0 * u.km,
2**0.5 * u.km / u.s, 0 * u.km / u.s, 2**0.5 * u.km / u.s,
representation_type='cartesian',
differential_type='cartesian')))
assert_quantity_allclose(sc10.radial_velocity, 0 * u.km / u.s, atol=1e-10 * u.km / u.s)
# But we shouldn't be able to pass both a frame with velocities, and explicit velocities
with pytest.raises(ValueError, match='frame already has differentials, cannot also specify velocity'):
sc2.with_observer_stationary_relative_to(ICRS(0 * u.km, 0 * u.km, 0 * u.km,
2**0.5 * u.km / u.s, 0 * u.km / u.s, 2**0.5 * u.km / u.s,
representation_type='cartesian',
differential_type='cartesian'),
velocity=[-2**0.5, 0, -2**0.5] * u.km / u.s)
# And velocities should have three elements
with pytest.raises(ValueError, match='velocity should be a Quantity vector with 3 elements'):
sc2.with_observer_stationary_relative_to(ICRS, velocity=[-2**0.5, 0, -2**0.5, -3] * u.km / u.s)
# Make sure things don't change depending on what frame class is used for reference
sc11 = sc2.with_observer_stationary_relative_to(SkyCoord(ICRS(0 * u.km, 0 * u.km, 0 * u.km,
2**0.5 * u.km / u.s, 0 * u.km / u.s, 2**0.5 * u.km / u.s,
representation_type='cartesian',
differential_type='cartesian')).transform_to(Galactic))
assert_quantity_allclose(sc11.radial_velocity, 0 * u.km / u.s, atol=1e-10 * u.km / u.s)
# Check that it is possible to preserve the observer frame
sc12 = sc2.with_observer_stationary_relative_to(LSRD)
sc13 = sc2.with_observer_stationary_relative_to(LSRD, preserve_observer_frame=True)
assert isinstance(sc12.observer, Galactic)
assert isinstance(sc13.observer, ICRS)
def test_los_shift_radial_velocity():
# Tests to make sure that with_radial_velocity_shift correctly calculates
# the new radial velocity
# First check case where observer and/or target aren't specified
sc1 = SpectralCoord(500 * u.nm, radial_velocity=1 * u.km / u.s)
sc2 = sc1.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc2.radial_velocity, 2 * u.km / u.s)
sc3 = sc1.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc3.radial_velocity, -2 * u.km / u.s)
with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
sc4 = SpectralCoord(500 * u.nm, radial_velocity=1 * u.km / u.s, observer=gcrs_not_origin)
sc5 = sc4.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc5.radial_velocity, 2 * u.km / u.s)
sc6 = sc4.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc6.radial_velocity, -2 * u.km / u.s)
with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
sc7 = SpectralCoord(500 * u.nm, radial_velocity=1 * u.km / u.s, target=ICRS(10 * u.deg, 20 * u.deg))
sc8 = sc7.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc8.radial_velocity, 2 * u.km / u.s)
sc9 = sc7.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc9.radial_velocity, -2 * u.km / u.s)
# Check that things still work when both observer and target are specified
with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
sc10 = SpectralCoord(500 * u.nm,
observer=ICRS(0 * u.deg, 0 * u.deg, distance=1 * u.m),
target=ICRS(10 * u.deg, 20 * u.deg,
radial_velocity=1 * u.km / u.s,
distance=10 * u.kpc))
sc11 = sc10.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc11.radial_velocity, 2 * u.km / u.s)
sc12 = sc10.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc12.radial_velocity, -2 * u.km / u.s)
# Check that things work if radial_velocity wasn't specified at all
sc13 = SpectralCoord(500 * u.nm)
sc14 = sc13.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc14.radial_velocity, 1 * u.km / u.s)
sc15 = sc1.with_radial_velocity_shift()
assert_quantity_allclose(sc15.radial_velocity, 1 * u.km / u.s)
# Check that units are verified
with pytest.raises(u.UnitsError, match="Argument must have unit physical "
"type 'speed' for radial velocty or "
"'dimensionless' for redshift."):
sc1.with_radial_velocity_shift(target_shift=1 * u.kg)
@pytest.mark.xfail
def test_relativistic_radial_velocity():
# Test for when both observer and target have relativistic velocities.
# This is not yet supported, so the test is xfailed for now.
sc = SpectralCoord(500 * u.nm,
observer=ICRS(0 * u.km, 0 * u.km, 0 * u.km,
-0.5 * c, -0.5 * c, -0.5 * c,
representation_type='cartesian',
differential_type='cartesian'),
target=ICRS(1 * u.kpc, 1 * u.kpc, 1 * u.kpc,
0.5 * c, 0.5 * c, 0.5 * c,
representation_type='cartesian',
differential_type='cartesian'))
assert_quantity_allclose(sc.radial_velocity, 0.989743318610787 * u.km / u.s)
# SCIENCE USE CASE TESTS
def test_spectral_coord_jupiter():
"""
Checks radial velocity between Earth and Jupiter
"""
obstime = time.Time('2018-12-13 9:00')
obs = get_greenwich_earthlocation().get_gcrs(obstime)
pos, vel = get_body_barycentric_posvel('jupiter', obstime)
jupiter = SkyCoord(pos.with_differentials(CartesianDifferential(vel.xyz)), obstime=obstime)
spc = SpectralCoord([100, 200, 300] * u.nm, observer=obs, target=jupiter)
# The velocity should be less than ~43 + a bit extra, which is the
# maximum possible earth-jupiter relative velocity. We check the exact
# value here (determined from SpectralCoord, so this serves as a test to
# check that this value doesn't change - the value is not a ground truth)
assert_quantity_allclose(spc.radial_velocity, -7.35219854 * u.km / u.s)
def test_spectral_coord_alphacen():
"""
Checks radial velocity between Earth and Alpha Centauri
"""
obstime = time.Time('2018-12-13 9:00')
obs = get_greenwich_earthlocation().get_gcrs(obstime)
# Coordinates were obtained from the following then hard-coded to avoid download
# acen = SkyCoord.from_name('alpha cen')
acen = SkyCoord(ra=219.90085*u.deg, dec=-60.83562*u.deg, frame='icrs',
distance=4.37*u.lightyear, radial_velocity=-18.*u.km/u.s)
spc = SpectralCoord([100, 200, 300] * u.nm, observer=obs, target=acen)
# The velocity should be less than ~18 + 30 + a bit extra, which is the
# maximum possible relative velocity. We check the exact value here
# (determined from SpectralCoord, so this serves as a test to check that
# this value doesn't change - the value is not a ground truth)
assert_quantity_allclose(spc.radial_velocity, -26.328301 * u.km / u.s)
def test_spectral_coord_m31():
"""
Checks radial velocity between Earth and M31
"""
obstime = time.Time('2018-12-13 9:00')
obs = get_greenwich_earthlocation().get_gcrs(obstime)
# Coordinates were obtained from the following then hard-coded to avoid download
# m31 = SkyCoord.from_name('M31')
m31 = SkyCoord(ra=10.6847*u.deg, dec=41.269*u.deg,
distance=710*u.kpc, radial_velocity=-300*u.km/u.s)
spc = SpectralCoord([100, 200, 300] * u.nm, observer=obs, target=m31)
# The velocity should be less than ~300 + 30 + a bit extra in km/s, which
# is the maximum possible relative velocity. We check the exact values
# here (determined from SpectralCoord, so this serves as a test to check
# that this value doesn't change - the value is not a ground truth)
assert_quantity_allclose(spc.radial_velocity, -279.755128 * u.km / u.s)
assert_allclose(spc.redshift, -0.0009327276702120191)
def test_shift_to_rest_galaxy():
"""
This tests storing a spectral coordinate with a specific redshift, and then
doing basic rest-to-observed-and-back transformations
"""
z = 5
rest_line_wls = [5007, 6563]*u.AA
observed_spc = SpectralCoord(rest_line_wls*(z+1), redshift=z)
rest_spc = observed_spc.to_rest()
# alternatively:
# rest_spc = observed_spc.with_observer(observed_spec.target)
# although then it would have to be clearly documented, or the `to_rest`
# implemented in Spectrum1D?
assert_quantity_allclose(rest_spc, rest_line_wls)
# No frames are explicitly defined, so to the user, the observer and
# target are not set.
with pytest.raises(AttributeError):
assert_frame_allclose(rest_spc.observer, rest_spc.target)
def test_shift_to_rest_star_withobserver():
rv = -8.3283011*u.km/u.s
rest_line_wls = [5007, 6563]*u.AA
obstime = time.Time('2018-12-13 9:00')
eloc = get_greenwich_earthlocation()
obs = eloc.get_gcrs(obstime)
acen = SkyCoord(ra=219.90085*u.deg, dec=-60.83562*u.deg, frame='icrs',
distance=4.37*u.lightyear)
# Note that above the rv is missing from the SkyCoord.
# That's intended, as it will instead be set in the `SpectralCoord`. But
# the SpectralCoord machinery should yield something comparable to test_
# spectral_coord_alphacen
with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
observed_spc = SpectralCoord(rest_line_wls*(rv/c + 1),
observer=obs, target=acen)
rest_spc = observed_spc.to_rest()
assert_quantity_allclose(rest_spc, rest_line_wls)
barycentric_spc = observed_spc.with_observer_stationary_relative_to('icrs')
baryrest_spc = barycentric_spc.to_rest()
assert quantity_allclose(baryrest_spc, rest_line_wls)
# now make sure the change the barycentric shift did is comparable to the
# offset rv_correction produces
# barytarg = SkyCoord(barycentric_spc.target.frame) # should be this but that doesn't work for unclear reasons
barytarg = SkyCoord(barycentric_spc.target.data.without_differentials(),
frame=barycentric_spc.target.realize_frame(None))
vcorr = barytarg.radial_velocity_correction(kind='barycentric',
obstime=obstime, location=eloc)
drv = baryrest_spc.radial_velocity - observed_spc.radial_velocity
# note this probably will not work on the first try, but it's ok if this is
# "good enough", where good enough is estimated below. But that could be
# adjusted if we think that's too aggressive of a precision target for what
# the machinery can handle
# with pytest.raises(AssertionError):
assert_quantity_allclose(vcorr, drv, atol=10*u.m/u.s)
gcrs_origin = GCRS(CartesianRepresentation([0*u.km, 0*u.km, 0*u.km]))
gcrs_not_origin = GCRS(CartesianRepresentation([1*u.km, 0*u.km, 0*u.km]))
@pytest.mark.parametrize("sc_kwargs", [
dict(radial_velocity=0*u.km/u.s),
dict(observer=gcrs_origin, radial_velocity=0*u.km/u.s),
dict(target=gcrs_origin, radial_velocity=0*u.km/u.s),
dict(observer=gcrs_origin, target=gcrs_not_origin)])
def test_los_shift(sc_kwargs):
wl = [4000, 5000]*u.AA
with nullcontext() if 'observer' not in sc_kwargs and 'target' not in sc_kwargs else pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
sc_init = SpectralCoord(wl, **sc_kwargs)
# these should always work in *all* cases because it's unambiguous that
# a target shift should behave this way
new_sc1 = sc_init.with_radial_velocity_shift(.1)
assert_quantity_allclose(new_sc1, wl*1.1)
new_sc2 = sc_init.with_radial_velocity_shift(.1*u.dimensionless_unscaled) # interpret at redshift
assert_quantity_allclose(new_sc1, new_sc2)
new_sc3 = sc_init.with_radial_velocity_shift(-100*u.km/u.s)
assert_quantity_allclose(new_sc3, wl*(1 + (-100*u.km/u.s / c)))
# now try the cases where observer is specified as well/instead
if sc_init.observer is None or sc_init.target is None:
with pytest.raises(ValueError):
# both must be specified if you're going to mess with observer
sc_init.with_radial_velocity_shift(observer_shift=.1)
if sc_init.observer is not None and sc_init.target is not None:
# redshifting the observer should *blushift* the LOS velocity since
# its the observer-to-target vector that matters
new_sc4 = sc_init.with_radial_velocity_shift(observer_shift=.1)
assert_quantity_allclose(new_sc4, wl/1.1)
# an equal shift in both should produce no offset at all
new_sc5 = sc_init.with_radial_velocity_shift(target_shift=.1, observer_shift=.1)
assert_quantity_allclose(new_sc5, wl)
def test_asteroid_velocity_frame_shifts():
"""
This test mocks up the use case of observing a spectrum of an asteroid
at different times and from different observer locations.
"""
time1 = time.Time('2018-12-13 9:00')
dt = 12*u.hour
time2 = time1 + dt
# make the silly but simplifying assumption that the astroid is moving along
# the x-axis of GCRS, and makes a 10 earth-radius closest approach
v_ast = [5, 0, 0]*u.km/u.s
x1 = -v_ast[0]*dt / 2
x2 = v_ast[0]*dt / 2
z = 10*u.Rearth
cdiff = CartesianDifferential(v_ast)
asteroid_loc1 = GCRS(CartesianRepresentation(x1.to(u.km),
0*u.km,
z.to(u.km),
differentials=cdiff),
obstime=time1)
asteroid_loc2 = GCRS(CartesianRepresentation(x2.to(u.km),
0*u.km,
z.to(u.km),
differentials=cdiff),
obstime=time2)
# assume satellites that are essentially fixed in geostationary orbit on
# opposite sides of the earth
observer1 = GCRS(CartesianRepresentation([0*u.km, 35000*u.km, 0*u.km]),
obstime=time1)
observer2 = GCRS(CartesianRepresentation([0*u.km, -35000*u.km, 0*u.km]),
obstime=time2)
wls = np.linspace(4000, 7000, 100) * u.AA
with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
spec_coord1 = SpectralCoord(wls, observer=observer1, target=asteroid_loc1)
assert spec_coord1.radial_velocity < 0*u.km/u.s
assert spec_coord1.radial_velocity > -5*u.km/u.s
with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
spec_coord2 = SpectralCoord(wls, observer=observer2, target=asteroid_loc2)
assert spec_coord2.radial_velocity > 0*u.km/u.s
assert spec_coord2.radial_velocity < 5*u.km/u.s
# now check the behavior of with_observer_stationary_relative_to: we shift each coord
# into the velocity frame of its *own* target. That would then be a
# spectralcoord that would allow direct physical comparison of the two
# different spec_corrds. There's no way to test that, without
# actual data, though.
# spec_coord2 is redshifted, so we test that it behaves the way "shifting
# to rest frame" should - the as-observed spectral coordinate should become
# the rest frame, so something that starts out red should become bluer
target_sc2 = spec_coord2.with_observer_stationary_relative_to(spec_coord2.target)
assert np.all(target_sc2 < spec_coord2)
# rv/redshift should be 0 since the observer and target velocities should
# be the same
assert_quantity_allclose(target_sc2.radial_velocity, 0*u.km/u.s,
atol=1e-7 * u.km / u.s)
# check that the same holds for spec_coord1, but be more specific: it
# should follow the standard redshift formula (which in this case yields
# a blueshift, although the formula is the same as 1+z)
target_sc1 = spec_coord1.with_observer_stationary_relative_to(spec_coord1.target)
assert_quantity_allclose(target_sc1, spec_coord1/(1+spec_coord1.redshift))
# TODO: Figure out what is meant by the below use case
# ensure the "target-rest" use gives the same answer
# target_sc1_alt = spec_coord1.with_observer_stationary_relative_to('target-rest')
# assert_quantity_allclose(target_sc1, target_sc1_alt)
def test_spectral_coord_from_sky_coord_without_distance():
# see https://github.com/astropy/specutils/issues/658 for issue context
obs = SkyCoord(0 * u.m, 0 * u.m, 0 * u.m, representation_type='cartesian')
with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
coord = SpectralCoord([1, 2, 3] * u.micron, observer=obs)
# coord.target = SkyCoord.from_name('m31') # <- original issue, but below is the same but requires no remote data access
with pytest.warns(AstropyUserWarning, match='Distance on coordinate object is dimensionless'):
coord.target = SkyCoord(ra=10.68470833*u.deg, dec=41.26875*u.deg)
EXPECTED_VELOCITY_FRAMES = {'geocent': 'gcrs',
'heliocent': 'hcrs',
'lsrk': 'lsrk',
'lsrd': 'lsrd',
'galactoc': FITSWCS_VELOCITY_FRAMES['GALACTOC'],
'localgrp': FITSWCS_VELOCITY_FRAMES['LOCALGRP']}
@pytest.mark.parametrize('specsys', list(EXPECTED_VELOCITY_FRAMES))
@pytest.mark.slow
def test_spectralcoord_accuracy(specsys):
# This is a test to check the numerical results of transformations between
# different velocity frames in SpectralCoord. This compares the velocity
# shifts determined with SpectralCoord to those determined from the rv
# package in Starlink.
velocity_frame = EXPECTED_VELOCITY_FRAMES[specsys]
reference_filename = get_pkg_data_filename('accuracy/data/rv.ecsv')
reference_table = Table.read(reference_filename, format='ascii.ecsv')
rest = 550 * u.nm
with iers.conf.set_temp('auto_download', False):
for row in reference_table:
observer = EarthLocation.from_geodetic(-row['obslon'], row['obslat']).get_itrs(obstime=row['obstime'])
with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
sc_topo = SpectralCoord(545 * u.nm, observer=observer, target=row['target'])
# FIXME: A warning is emitted for dates after MJD=57754.0 even
# though the leap second table should be valid until the end of
# 2020.
with nullcontext() if row['obstime'].mjd < 57754 else pytest.warns(AstropyWarning, match='Tried to get polar motions'):
sc_final = sc_topo.with_observer_stationary_relative_to(velocity_frame)
delta_vel = (sc_topo.to(u.km / u.s, doppler_convention='relativistic', doppler_rest=rest) -
sc_final.to(u.km / u.s, doppler_convention='relativistic', doppler_rest=rest))
if specsys == 'galactoc':
assert_allclose(delta_vel.to_value(u.km / u.s), row[specsys.lower()], atol=30)
else:
assert_allclose(delta_vel.to_value(u.km / u.s), row[specsys.lower()], atol=0.02, rtol=0.002)
# TODO: add test when target is not ICRS
# TODO: add test when SpectralCoord is in velocity to start with
|
d72e6fa05a9ee65b614bde6a6feec14dfab04f701659674a0d05938715d3a640 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from contextlib import ExitStack
import pytest
import numpy as np
from numpy import testing as npt
from astropy import units as u
from astropy.time import Time
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.utils.compat import NUMPY_LT_1_19
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.coordinates import (Angle, ICRS, FK4, FK5, Galactic, SkyCoord,
CartesianRepresentation)
from astropy.coordinates.angle_formats import dms_to_degrees, hms_to_hours
def test_angle_arrays():
"""
Test arrays values with Angle objects.
"""
# Tests incomplete
a1 = Angle([0, 45, 90, 180, 270, 360, 720.], unit=u.degree)
npt.assert_almost_equal([0., 45., 90., 180., 270., 360., 720.], a1.value)
a2 = Angle(np.array([-90, -45, 0, 45, 90, 180, 270, 360]), unit=u.degree)
npt.assert_almost_equal([-90, -45, 0, 45, 90, 180, 270, 360],
a2.value)
a3 = Angle(["12 degrees", "3 hours", "5 deg", "4rad"])
npt.assert_almost_equal([12., 45., 5., 229.18311805],
a3.value)
assert a3.unit == u.degree
a4 = Angle(["12 degrees", "3 hours", "5 deg", "4rad"], u.radian)
npt.assert_almost_equal(a4.degree, a3.value)
assert a4.unit == u.radian
a5 = Angle([0, 45, 90, 180, 270, 360], unit=u.degree)
a6 = a5.sum()
npt.assert_almost_equal(a6.value, 945.0)
assert a6.unit is u.degree
with ExitStack() as stack:
stack.enter_context(pytest.raises(TypeError))
# Arrays where the elements are Angle objects are not supported -- it's
# really tricky to do correctly, if at all, due to the possibility of
# nesting.
if not NUMPY_LT_1_19:
stack.enter_context(
pytest.warns(DeprecationWarning,
match='automatic object dtype is deprecated'))
a7 = Angle([a1, a2, a3], unit=u.degree)
a8 = Angle(["04:02:02", "03:02:01", "06:02:01"], unit=u.degree)
npt.assert_almost_equal(a8.value, [4.03388889, 3.03361111, 6.03361111])
a9 = Angle(np.array(["04:02:02", "03:02:01", "06:02:01"]), unit=u.degree)
npt.assert_almost_equal(a9.value, a8.value)
with pytest.raises(u.UnitsError):
a10 = Angle(["04:02:02", "03:02:01", "06:02:01"])
def test_dms():
a1 = Angle([0, 45.5, -45.5], unit=u.degree)
d, m, s = a1.dms
npt.assert_almost_equal(d, [0, 45, -45])
npt.assert_almost_equal(m, [0, 30, -30])
npt.assert_almost_equal(s, [0, 0, -0])
def test_hms():
a1 = Angle([0, 11.5, -11.5], unit=u.hour)
h, m, s = a1.hms
npt.assert_almost_equal(h, [0, 11, -11])
npt.assert_almost_equal(m, [0, 30, -30])
npt.assert_almost_equal(s, [0, 0, -0])
hms = a1.hms
hours = hms[0] + hms[1] / 60. + hms[2] / 3600.
npt.assert_almost_equal(a1.hour, hours)
with pytest.warns(AstropyDeprecationWarning, match='hms_to_hours'):
a2 = Angle(hms, unit=u.hour)
npt.assert_almost_equal(a2.radian, a1.radian)
def test_array_coordinates_creation():
"""
Test creating coordinates from arrays.
"""
c = ICRS(np.array([1, 2])*u.deg, np.array([3, 4])*u.deg)
assert not c.ra.isscalar
with pytest.raises(ValueError):
c = ICRS(np.array([1, 2])*u.deg, np.array([3, 4, 5])*u.deg)
with pytest.raises(ValueError):
c = ICRS(np.array([1, 2, 4, 5])*u.deg, np.array([[3, 4], [5, 6]])*u.deg)
# make sure cartesian initialization also works
cart = CartesianRepresentation(x=[1., 2.]*u.kpc, y=[3., 4.]*u.kpc, z=[5., 6.]*u.kpc)
c = ICRS(cart)
# also ensure strings can be arrays
c = SkyCoord(['1d0m0s', '2h02m00.3s'], ['3d', '4d'])
# but invalid strings cannot
with pytest.raises(ValueError):
c = SkyCoord(Angle(['10m0s', '2h02m00.3s']), Angle(['3d', '4d']))
with pytest.raises(ValueError):
c = SkyCoord(Angle(['1d0m0s', '2h02m00.3s']), Angle(['3x', '4d']))
def test_array_coordinates_distances():
"""
Test creating coordinates from arrays and distances.
"""
# correct way
ICRS(ra=np.array([1, 2])*u.deg, dec=np.array([3, 4])*u.deg, distance=[.1, .2] * u.kpc)
with pytest.raises(ValueError):
# scalar distance and mismatched array coordinates
ICRS(ra=np.array([1, 2, 3])*u.deg, dec=np.array([[3, 4], [5, 6]])*u.deg, distance=2. * u.kpc)
with pytest.raises(ValueError):
# more distance values than coordinates
ICRS(ra=np.array([1, 2])*u.deg, dec=np.array([3, 4])*u.deg, distance=[.1, .2, 3.] * u.kpc)
@pytest.mark.parametrize(('arrshape', 'distance'), [((2, ), None), ((4, 2, 5), None), ((4, 2, 5), 2 * u.kpc)])
def test_array_coordinates_transformations(arrshape, distance):
"""
Test transformation on coordinates with array content (first length-2 1D, then a 3D array)
"""
# M31 coordinates from test_transformations
raarr = np.ones(arrshape) * 10.6847929
decarr = np.ones(arrshape) * 41.2690650
if distance is not None:
distance = np.ones(arrshape) * distance
print(raarr, decarr, distance)
c = ICRS(ra=raarr*u.deg, dec=decarr*u.deg, distance=distance)
g = c.transform_to(Galactic())
assert g.l.shape == arrshape
npt.assert_array_almost_equal(g.l.degree, 121.17440967)
npt.assert_array_almost_equal(g.b.degree, -21.57299631)
if distance is not None:
assert g.distance.unit == c.distance.unit
# now make sure round-tripping works through FK5
c2 = c.transform_to(FK5()).transform_to(ICRS())
npt.assert_array_almost_equal(c.ra.radian, c2.ra.radian)
npt.assert_array_almost_equal(c.dec.radian, c2.dec.radian)
assert c2.ra.shape == arrshape
if distance is not None:
assert c2.distance.unit == c.distance.unit
# also make sure it's possible to get to FK4, which uses a direct transform function.
fk4 = c.transform_to(FK4())
npt.assert_array_almost_equal(fk4.ra.degree, 10.0004, decimal=4)
npt.assert_array_almost_equal(fk4.dec.degree, 40.9953, decimal=4)
assert fk4.ra.shape == arrshape
if distance is not None:
assert fk4.distance.unit == c.distance.unit
# now check the reverse transforms run
cfk4 = fk4.transform_to(ICRS())
assert cfk4.ra.shape == arrshape
def test_array_precession():
"""
Ensures that FK5 coordinates as arrays precess their equinoxes
"""
j2000 = Time('J2000')
j1975 = Time('J1975')
fk5 = FK5([1, 1.1]*u.radian, [0.5, 0.6]*u.radian)
assert fk5.equinox.jyear == j2000.jyear
fk5_2 = fk5.transform_to(FK5(equinox=j1975))
assert fk5_2.equinox.jyear == j1975.jyear
npt.assert_array_less(0.05, np.abs(fk5.ra.degree - fk5_2.ra.degree))
npt.assert_array_less(0.05, np.abs(fk5.dec.degree - fk5_2.dec.degree))
def test_array_separation():
c1 = ICRS([0, 0]*u.deg, [0, 0]*u.deg)
c2 = ICRS([1, 2]*u.deg, [0, 0]*u.deg)
npt.assert_array_almost_equal(c1.separation(c2).degree, [1, 2])
c3 = ICRS([0, 3.]*u.deg, [0., 0]*u.deg, distance=[1, 1.] * u.kpc)
c4 = ICRS([1, 1.]*u.deg, [0., 0]*u.deg, distance=[1, 1.] * u.kpc)
# the 3-1 separation should be twice the 0-1 separation, but not *exactly* the same
sep = c3.separation_3d(c4)
sepdiff = sep[1] - (2 * sep[0])
assert abs(sepdiff.value) < 1e-5
assert sepdiff != 0
def test_array_indexing():
ra = np.linspace(0, 360, 10)
dec = np.linspace(-90, 90, 10)
j1975 = Time(1975, format='jyear')
c1 = FK5(ra*u.deg, dec*u.deg, equinox=j1975)
c2 = c1[4]
assert c2.ra.degree == 160
assert c2.dec.degree == -10
c3 = c1[2:5]
assert_allclose(c3.ra, [80, 120, 160] * u.deg)
assert_allclose(c3.dec, [-50, -30, -10] * u.deg)
c4 = c1[np.array([2, 5, 8])]
assert_allclose(c4.ra, [80, 200, 320] * u.deg)
assert_allclose(c4.dec, [-50, 10, 70] * u.deg)
# now make sure the equinox is preserved
assert c2.equinox == c1.equinox
assert c3.equinox == c1.equinox
assert c4.equinox == c1.equinox
def test_array_len():
input_length = [1, 5]
for length in input_length:
ra = np.linspace(0, 360, length)
dec = np.linspace(0, 90, length)
c = ICRS(ra*u.deg, dec*u.deg)
assert len(c) == length
assert c.shape == (length,)
with pytest.raises(TypeError):
c = ICRS(0*u.deg, 0*u.deg)
len(c)
assert c.shape == tuple()
def test_array_eq():
c1 = ICRS([1, 2]*u.deg, [3, 4]*u.deg)
c2 = ICRS([1, 2]*u.deg, [3, 5]*u.deg)
c3 = ICRS([1, 3]*u.deg, [3, 4]*u.deg)
c4 = ICRS([1, 2]*u.deg, [3, 4.2]*u.deg)
assert np.all(c1 == c1)
assert np.any(c1 != c2)
assert np.any(c1 != c3)
assert np.any(c1 != c4)
|
6bbf372e89e8fe557b3ee8b53064bbcec21dfb1b77748e887c88749d8b9a1cb5 |
import pytest
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import allclose as quantity_allclose
from astropy import units as u
from astropy.coordinates import Longitude, Latitude, EarthLocation
from astropy.coordinates.sites import get_builtin_sites, get_downloaded_sites, SiteRegistry
def test_builtin_sites():
reg = get_builtin_sites()
greenwich = reg['greenwich']
lon, lat, el = greenwich.to_geodetic()
assert_quantity_allclose(lon, Longitude('0:0:0', unit=u.deg),
atol=10*u.arcsec)
assert_quantity_allclose(lat, Latitude('51:28:40', unit=u.deg),
atol=1*u.arcsec)
assert_quantity_allclose(el, 46*u.m, atol=1*u.m)
names = reg.names
assert 'greenwich' in names
assert 'example_site' in names
with pytest.raises(KeyError) as exc:
reg['nonexistent site']
assert exc.value.args[0] == "Site 'nonexistent site' not in database. Use the 'names' attribute to see available sites."
@pytest.mark.remote_data(source='astropy')
def test_online_sites():
reg = get_downloaded_sites()
keck = reg['keck']
lon, lat, el = keck.to_geodetic()
assert_quantity_allclose(lon, -Longitude('155:28.7', unit=u.deg),
atol=0.001*u.deg)
assert_quantity_allclose(lat, Latitude('19:49.7', unit=u.deg),
atol=0.001*u.deg)
assert_quantity_allclose(el, 4160*u.m, atol=1*u.m)
names = reg.names
assert 'keck' in names
assert 'ctio' in names
# The JSON file contains `name` and `aliases` for each site, and astropy
# should use names from both, but not empty strings [#12721].
assert '' not in names
assert 'Royal Observatory Greenwich' in names
with pytest.raises(KeyError) as exc:
reg['nonexistent site']
assert exc.value.args[0] == "Site 'nonexistent site' not in database. Use the 'names' attribute to see available sites."
with pytest.raises(KeyError) as exc:
reg['kec']
assert exc.value.args[0] == "Site 'kec' not in database. Use the 'names' attribute to see available sites. Did you mean one of: 'keck'?'"
@pytest.mark.remote_data(source='astropy')
# this will *try* the online so we have to make it remote_data, even though it
# could fall back on the non-remote version
def test_EarthLocation_basic():
greenwichel = EarthLocation.of_site('greenwich')
lon, lat, el = greenwichel.to_geodetic()
assert_quantity_allclose(lon, Longitude('0:0:0', unit=u.deg),
atol=10*u.arcsec)
assert_quantity_allclose(lat, Latitude('51:28:40', unit=u.deg),
atol=1*u.arcsec)
assert_quantity_allclose(el, 46*u.m, atol=1*u.m)
names = EarthLocation.get_site_names()
assert 'greenwich' in names
assert 'example_site' in names
with pytest.raises(KeyError) as exc:
EarthLocation.of_site('nonexistent site')
assert exc.value.args[0] == "Site 'nonexistent site' not in database. Use EarthLocation.get_site_names to see available sites."
def test_EarthLocation_state_offline():
EarthLocation._site_registry = None
EarthLocation._get_site_registry(force_builtin=True)
assert EarthLocation._site_registry is not None
oldreg = EarthLocation._site_registry
newreg = EarthLocation._get_site_registry()
assert oldreg is newreg
newreg = EarthLocation._get_site_registry(force_builtin=True)
assert oldreg is not newreg
@pytest.mark.remote_data(source='astropy')
def test_EarthLocation_state_online():
EarthLocation._site_registry = None
EarthLocation._get_site_registry(force_download=True)
assert EarthLocation._site_registry is not None
oldreg = EarthLocation._site_registry
newreg = EarthLocation._get_site_registry()
assert oldreg is newreg
newreg = EarthLocation._get_site_registry(force_download=True)
assert oldreg is not newreg
def test_registry():
reg = SiteRegistry()
assert len(reg.names) == 0
names = ['sitea', 'site A']
loc = EarthLocation.from_geodetic(lat=1*u.deg, lon=2*u.deg, height=3*u.km)
reg.add_site(names, loc)
assert len(reg.names) == 2
loc1 = reg['SIteA']
assert loc1 is loc
loc2 = reg['sIte a']
assert loc2 is loc
def test_non_EarthLocation():
"""
A regression test for a typo bug pointed out at the bottom of
https://github.com/astropy/astropy/pull/4042
"""
class EarthLocation2(EarthLocation):
pass
# This lets keeps us from needing to do remote_data
# note that this does *not* mess up the registry for EarthLocation because
# registry is cached on a per-class basis
EarthLocation2._get_site_registry(force_builtin=True)
el2 = EarthLocation2.of_site('greenwich')
assert type(el2) is EarthLocation2
assert el2.info.name == 'Royal Observatory Greenwich'
def check_builtin_matches_remote(download_url=True):
"""
This function checks that the builtin sites registry is consistent with the
remote registry (or a registry at some other location).
Note that current this is *not* run by the testing suite (because it
doesn't start with "test", and is instead meant to be used as a check
before merging changes in astropy-data)
"""
builtin_registry = EarthLocation._get_site_registry(force_builtin=True)
dl_registry = EarthLocation._get_site_registry(force_download=download_url)
in_dl = {}
matches = {}
for name in builtin_registry.names:
in_dl[name] = name in dl_registry
if in_dl[name]:
matches[name] = quantity_allclose(builtin_registry[name].geocentric, dl_registry[name].geocentric)
else:
matches[name] = False
if not all(matches.values()):
# this makes sure we actually see which don't match
print("In builtin registry but not in download:")
for name in in_dl:
if not in_dl[name]:
print(' ', name)
print("In both but not the same value:")
for name in matches:
if not matches[name] and in_dl[name]:
print(' ', name, 'builtin:', builtin_registry[name], 'download:', dl_registry[name])
assert False, "Builtin and download registry aren't consistent - failures printed to stdout"
def test_meta_present():
reg = get_builtin_sites()
greenwich = reg['greenwich']
assert greenwich.info.meta['source'] == ('Ordnance Survey via '
'http://gpsinformation.net/main/greenwich.htm and UNESCO')
|
d72ced925d2271ce432fc95383de6694824c981bc91e773b4c05d8a43580a247 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import errno
import http.client
import mmap
import operator
import io
import os
import sys
import tempfile
import warnings
import zipfile
import re
from functools import reduce
import numpy as np
from .util import (isreadable, iswritable, isfile, fileobj_name,
fileobj_closed, fileobj_mode, _array_from_file,
_array_to_file, _write_string)
from astropy.utils.data import download_file, _is_url
from astropy.utils.decorators import classproperty
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
# NOTE: Python can be built without bz2.
from astropy.utils.compat.optional_deps import HAS_BZ2
if HAS_BZ2:
import bz2
# Maps astropy.io.fits-specific file mode names to the appropriate file
# modes to use for the underlying raw files.
IO_FITS_MODES = {
'readonly': 'rb',
'copyonwrite': 'rb',
'update': 'rb+',
'append': 'ab+',
'ostream': 'wb',
'denywrite': 'rb'}
# Maps OS-level file modes to the appropriate astropy.io.fits specific mode
# to use when given file objects but no mode specified; obviously in
# IO_FITS_MODES there are overlaps; for example 'readonly' and 'denywrite'
# both require the file to be opened in 'rb' mode. But 'readonly' is the
# default behavior for such files if not otherwise specified.
# Note: 'ab' is only supported for 'ostream' which is output-only.
FILE_MODES = {
'rb': 'readonly', 'rb+': 'update',
'wb': 'ostream', 'wb+': 'update',
'ab': 'ostream', 'ab+': 'append'}
# A match indicates the file was opened in text mode, which is not allowed
TEXT_RE = re.compile(r'^[rwa]((t?\+?)|(\+?t?))$')
# readonly actually uses copyonwrite for mmap so that readonly without mmap and
# with mmap still have to same behavior with regard to updating the array. To
# get a truly readonly mmap use denywrite
# the name 'denywrite' comes from a deprecated flag to mmap() on Linux--it
# should be clarified that 'denywrite' mode is not directly analogous to the
# use of that flag; it was just taken, for lack of anything better, as a name
# that means something like "read only" but isn't readonly.
MEMMAP_MODES = {'readonly': mmap.ACCESS_COPY,
'copyonwrite': mmap.ACCESS_COPY,
'update': mmap.ACCESS_WRITE,
'append': mmap.ACCESS_COPY,
'denywrite': mmap.ACCESS_READ}
# TODO: Eventually raise a warning, and maybe even later disable the use of
# 'copyonwrite' and 'denywrite' modes unless memmap=True. For now, however,
# that would generate too many warnings for too many users. If nothing else,
# wait until the new logging system is in place.
GZIP_MAGIC = b'\x1f\x8b\x08'
PKZIP_MAGIC = b'\x50\x4b\x03\x04'
BZIP2_MAGIC = b'\x42\x5a'
def _is_bz2file(fileobj):
if HAS_BZ2:
return isinstance(fileobj, bz2.BZ2File)
else:
return False
def _normalize_fits_mode(mode):
if mode is not None and mode not in IO_FITS_MODES:
if TEXT_RE.match(mode):
raise ValueError(
"Text mode '{}' not supported: "
"files must be opened in binary mode".format(mode))
new_mode = FILE_MODES.get(mode)
if new_mode not in IO_FITS_MODES:
raise ValueError(f"Mode '{mode}' not recognized")
mode = new_mode
return mode
class _File:
"""
Represents a FITS file on disk (or in some other file-like object).
"""
def __init__(self, fileobj=None, mode=None, memmap=None, overwrite=False,
cache=True):
self.strict_memmap = bool(memmap)
memmap = True if memmap is None else memmap
self._file = None
self.closed = False
self.binary = True
self.mode = mode
self.memmap = memmap
self.compression = None
self.readonly = False
self.writeonly = False
# Should the object be closed on error: see
# https://github.com/astropy/astropy/issues/6168
self.close_on_error = False
# Holds mmap instance for files that use mmap
self._mmap = None
if fileobj is None:
self.simulateonly = True
return
else:
self.simulateonly = False
if isinstance(fileobj, os.PathLike):
fileobj = os.fspath(fileobj)
if mode is not None and mode not in IO_FITS_MODES:
raise ValueError(f"Mode '{mode}' not recognized")
if isfile(fileobj):
objmode = _normalize_fits_mode(fileobj_mode(fileobj))
if mode is not None and mode != objmode:
raise ValueError(
"Requested FITS mode '{}' not compatible with open file "
"handle mode '{}'".format(mode, objmode))
mode = objmode
if mode is None:
mode = 'readonly'
# Handle raw URLs
if (isinstance(fileobj, (str, bytes)) and
mode not in ('ostream', 'append', 'update') and _is_url(fileobj)):
self.name = download_file(fileobj, cache=cache)
# Handle responses from URL requests that have already been opened
elif isinstance(fileobj, http.client.HTTPResponse):
if mode in ('ostream', 'append', 'update'):
raise ValueError(
f"Mode {mode} not supported for HTTPResponse")
fileobj = io.BytesIO(fileobj.read())
else:
self.name = fileobj_name(fileobj)
self.mode = mode
# Underlying fileobj is a file-like object, but an actual file object
self.file_like = False
# Initialize the internal self._file object
if isfile(fileobj):
self._open_fileobj(fileobj, mode, overwrite)
elif isinstance(fileobj, (str, bytes)):
self._open_filename(fileobj, mode, overwrite)
else:
self._open_filelike(fileobj, mode, overwrite)
self.fileobj_mode = fileobj_mode(self._file)
if isinstance(fileobj, gzip.GzipFile):
self.compression = 'gzip'
elif isinstance(fileobj, zipfile.ZipFile):
# Reading from zip files is supported but not writing (yet)
self.compression = 'zip'
elif _is_bz2file(fileobj):
self.compression = 'bzip2'
if (mode in ('readonly', 'copyonwrite', 'denywrite') or
(self.compression and mode == 'update')):
self.readonly = True
elif (mode == 'ostream' or
(self.compression and mode == 'append')):
self.writeonly = True
# For 'ab+' mode, the pointer is at the end after the open in
# Linux, but is at the beginning in Solaris.
if (mode == 'ostream' or self.compression or
not hasattr(self._file, 'seek')):
# For output stream start with a truncated file.
# For compressed files we can't really guess at the size
self.size = 0
else:
pos = self._file.tell()
self._file.seek(0, 2)
self.size = self._file.tell()
self._file.seek(pos)
if self.memmap:
if not isfile(self._file):
self.memmap = False
elif not self.readonly and not self._mmap_available:
# Test mmap.flush--see
# https://github.com/astropy/astropy/issues/968
self.memmap = False
def __repr__(self):
return f'<{self.__module__}.{self.__class__.__name__} {self._file}>'
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def readable(self):
if self.writeonly:
return False
return isreadable(self._file)
def read(self, size=None):
if not hasattr(self._file, 'read'):
raise EOFError
try:
return self._file.read(size)
except OSError:
# On some versions of Python, it appears, GzipFile will raise an
# OSError if you try to read past its end (as opposed to just
# returning '')
if self.compression == 'gzip':
return ''
raise
def readarray(self, size=None, offset=0, dtype=np.uint8, shape=None):
"""
Similar to file.read(), but returns the contents of the underlying
file as a numpy array (or mmap'd array if memmap=True) rather than a
string.
Usually it's best not to use the `size` argument with this method, but
it's provided for compatibility.
"""
if not hasattr(self._file, 'read'):
raise EOFError
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
if size and size % dtype.itemsize != 0:
raise ValueError(f'size {size} not a multiple of {dtype}')
if isinstance(shape, int):
shape = (shape,)
if not (size or shape):
warnings.warn('No size or shape given to readarray(); assuming a '
'shape of (1,)', AstropyUserWarning)
shape = (1,)
if size and not shape:
shape = (size // dtype.itemsize,)
if size and shape:
actualsize = np.prod(shape) * dtype.itemsize
if actualsize > size:
raise ValueError('size {} is too few bytes for a {} array of '
'{}'.format(size, shape, dtype))
elif actualsize < size:
raise ValueError('size {} is too many bytes for a {} array of '
'{}'.format(size, shape, dtype))
filepos = self._file.tell()
try:
if self.memmap:
if self._mmap is None:
# Instantiate Memmap array of the file offset at 0 (so we
# can return slices of it to offset anywhere else into the
# file)
access_mode = MEMMAP_MODES[self.mode]
# For reasons unknown the file needs to point to (near)
# the beginning or end of the file. No idea how close to
# the beginning or end.
# If I had to guess there is some bug in the mmap module
# of CPython or perhaps in microsoft's underlying code
# for generating the mmap.
self._file.seek(0, 0)
# This would also work:
# self._file.seek(0, 2) # moves to the end
try:
self._mmap = mmap.mmap(self._file.fileno(), 0,
access=access_mode,
offset=0)
except OSError as exc:
# NOTE: mode='readonly' results in the memory-mapping
# using the ACCESS_COPY mode in mmap so that users can
# modify arrays. However, on some systems, the OS raises
# a '[Errno 12] Cannot allocate memory' OSError if the
# address space is smaller than the file. The solution
# is to open the file in mode='denywrite', which at
# least allows the file to be opened even if the
# resulting arrays will be truly read-only.
if exc.errno == errno.ENOMEM and self.mode == 'readonly':
warnings.warn("Could not memory map array with "
"mode='readonly', falling back to "
"mode='denywrite', which means that "
"the array will be read-only",
AstropyUserWarning)
self._mmap = mmap.mmap(self._file.fileno(), 0,
access=MEMMAP_MODES['denywrite'],
offset=0)
else:
raise
return np.ndarray(shape=shape, dtype=dtype, offset=offset,
buffer=self._mmap)
else:
count = reduce(operator.mul, shape)
self._file.seek(offset)
data = _array_from_file(self._file, dtype, count)
data.shape = shape
return data
finally:
# Make sure we leave the file in the position we found it; on
# some platforms (e.g. Windows) mmaping a file handle can also
# reset its file pointer.
# Also for Windows when using mmap seek() may return weird
# negative values, which is fixed by calling tell() before.
self._file.tell()
self._file.seek(filepos)
def writable(self):
if self.readonly:
return False
return iswritable(self._file)
def write(self, string):
if self.simulateonly:
return
if hasattr(self._file, 'write'):
_write_string(self._file, string)
def writearray(self, array):
"""
Similar to file.write(), but writes a numpy array instead of a string.
Also like file.write(), a flush() or close() may be needed before
the file on disk reflects the data written.
"""
if self.simulateonly:
return
if hasattr(self._file, 'write'):
_array_to_file(array, self._file)
def flush(self):
if self.simulateonly:
return
if hasattr(self._file, 'flush'):
self._file.flush()
def seek(self, offset, whence=0):
if not hasattr(self._file, 'seek'):
return
self._file.seek(offset, whence)
pos = self._file.tell()
if self.size and pos > self.size:
warnings.warn('File may have been truncated: actual file length '
'({}) is smaller than the expected size ({})'
.format(self.size, pos), AstropyUserWarning)
def tell(self):
if self.simulateonly:
raise OSError
if not hasattr(self._file, 'tell'):
raise EOFError
return self._file.tell()
def truncate(self, size=None):
if hasattr(self._file, 'truncate'):
self._file.truncate(size)
def close(self):
"""
Close the 'physical' FITS file.
"""
if hasattr(self._file, 'close'):
self._file.close()
self._maybe_close_mmap()
# Set self._memmap to None anyways since no new .data attributes can be
# loaded after the file is closed
self._mmap = None
self.closed = True
self.close_on_error = False
def _maybe_close_mmap(self, refcount_delta=0):
"""
When mmap is in use these objects hold a reference to the mmap of the
file (so there is only one, shared by all HDUs that reference this
file).
This will close the mmap if there are no arrays referencing it.
"""
if (self._mmap is not None and
sys.getrefcount(self._mmap) == 2 + refcount_delta):
self._mmap.close()
self._mmap = None
def _overwrite_existing(self, overwrite, fileobj, closed):
"""Overwrite an existing file if ``overwrite`` is ``True``, otherwise
raise an OSError. The exact behavior of this method depends on the
_File object state and is only meant for use within the ``_open_*``
internal methods.
"""
# The file will be overwritten...
if ((self.file_like and hasattr(fileobj, 'len') and fileobj.len > 0) or
(os.path.exists(self.name) and os.path.getsize(self.name) != 0)):
if overwrite:
if self.file_like and hasattr(fileobj, 'truncate'):
fileobj.truncate(0)
else:
if not closed:
fileobj.close()
os.remove(self.name)
else:
raise OSError(NOT_OVERWRITING_MSG.format(self.name))
def _try_read_compressed(self, obj_or_name, magic, mode, ext=''):
"""Attempt to determine if the given file is compressed"""
is_ostream = mode == 'ostream'
if (is_ostream and ext == '.gz') or magic.startswith(GZIP_MAGIC):
if mode == 'append':
raise OSError("'append' mode is not supported with gzip files."
"Use 'update' mode instead")
# Handle gzip files
kwargs = dict(mode=IO_FITS_MODES[mode])
if isinstance(obj_or_name, str):
kwargs['filename'] = obj_or_name
else:
kwargs['fileobj'] = obj_or_name
self._file = gzip.GzipFile(**kwargs)
self.compression = 'gzip'
elif (is_ostream and ext == '.zip') or magic.startswith(PKZIP_MAGIC):
# Handle zip files
self._open_zipfile(self.name, mode)
self.compression = 'zip'
elif (is_ostream and ext == '.bz2') or magic.startswith(BZIP2_MAGIC):
# Handle bzip2 files
if mode in ['update', 'append']:
raise OSError("update and append modes are not supported "
"with bzip2 files")
if not HAS_BZ2:
raise ModuleNotFoundError(
"This Python installation does not provide the bz2 module.")
# bzip2 only supports 'w' and 'r' modes
bzip2_mode = 'w' if is_ostream else 'r'
self._file = bz2.BZ2File(obj_or_name, mode=bzip2_mode)
self.compression = 'bzip2'
return self.compression is not None
def _open_fileobj(self, fileobj, mode, overwrite):
"""Open a FITS file from a file object (including compressed files)."""
closed = fileobj_closed(fileobj)
# FIXME: this variable was unused, check if it was useful
# fmode = fileobj_mode(fileobj) or IO_FITS_MODES[mode]
if mode == 'ostream':
self._overwrite_existing(overwrite, fileobj, closed)
if not closed:
self._file = fileobj
elif isfile(fileobj):
self._file = open(self.name, IO_FITS_MODES[mode])
# Attempt to determine if the file represented by the open file object
# is compressed
try:
# We need to account for the possibility that the underlying file
# handle may have been opened with either 'ab' or 'ab+', which
# means that the current file position is at the end of the file.
if mode in ['ostream', 'append']:
self._file.seek(0)
magic = self._file.read(4)
# No matter whether the underlying file was opened with 'ab' or
# 'ab+', we need to return to the beginning of the file in order
# to properly process the FITS header (and handle the possibility
# of a compressed file).
self._file.seek(0)
except OSError:
return
self._try_read_compressed(fileobj, magic, mode)
def _open_filelike(self, fileobj, mode, overwrite):
"""Open a FITS file from a file-like object, i.e. one that has
read and/or write methods.
"""
self.file_like = True
self._file = fileobj
if fileobj_closed(fileobj):
raise OSError("Cannot read from/write to a closed file-like "
"object ({!r}).".format(fileobj))
if isinstance(fileobj, zipfile.ZipFile):
self._open_zipfile(fileobj, mode)
# We can bypass any additional checks at this point since now
# self._file points to the temp file extracted from the zip
return
# If there is not seek or tell methods then set the mode to
# output streaming.
if (not hasattr(self._file, 'seek') or
not hasattr(self._file, 'tell')):
self.mode = mode = 'ostream'
if mode == 'ostream':
self._overwrite_existing(overwrite, fileobj, False)
# Any "writeable" mode requires a write() method on the file object
if (self.mode in ('update', 'append', 'ostream') and
not hasattr(self._file, 'write')):
raise OSError("File-like object does not have a 'write' "
"method, required for mode '{}'.".format(self.mode))
# Any mode except for 'ostream' requires readability
if self.mode != 'ostream' and not hasattr(self._file, 'read'):
raise OSError("File-like object does not have a 'read' "
"method, required for mode {!r}.".format(self.mode))
def _open_filename(self, filename, mode, overwrite):
"""Open a FITS file from a filename string."""
if mode == 'ostream':
self._overwrite_existing(overwrite, None, True)
if os.path.exists(self.name):
with open(self.name, 'rb') as f:
magic = f.read(4)
else:
magic = b''
ext = os.path.splitext(self.name)[1]
if not self._try_read_compressed(self.name, magic, mode, ext=ext):
self._file = open(self.name, IO_FITS_MODES[mode])
self.close_on_error = True
# Make certain we're back at the beginning of the file
# BZ2File does not support seek when the file is open for writing, but
# when opening a file for write, bz2.BZ2File always truncates anyway.
if not (_is_bz2file(self._file) and mode == 'ostream'):
self._file.seek(0)
@classproperty(lazy=True)
def _mmap_available(cls):
"""Tests that mmap, and specifically mmap.flush works. This may
be the case on some uncommon platforms (see
https://github.com/astropy/astropy/issues/968).
If mmap.flush is found not to work, ``self.memmap = False`` is
set and a warning is issued.
"""
tmpfd, tmpname = tempfile.mkstemp()
try:
# Windows does not allow mappings on empty files
os.write(tmpfd, b' ')
os.fsync(tmpfd)
try:
mm = mmap.mmap(tmpfd, 1, access=mmap.ACCESS_WRITE)
except OSError as exc:
warnings.warn('Failed to create mmap: {}; mmap use will be '
'disabled'.format(str(exc)), AstropyUserWarning)
del exc
return False
try:
mm.flush()
except OSError:
warnings.warn('mmap.flush is unavailable on this platform; '
'using mmap in writeable mode will be disabled',
AstropyUserWarning)
return False
finally:
mm.close()
finally:
os.close(tmpfd)
os.remove(tmpname)
return True
def _open_zipfile(self, fileobj, mode):
"""Limited support for zipfile.ZipFile objects containing a single
a file. Allows reading only for now by extracting the file to a
tempfile.
"""
if mode in ('update', 'append'):
raise OSError(
"Writing to zipped fits files is not currently "
"supported")
if not isinstance(fileobj, zipfile.ZipFile):
zfile = zipfile.ZipFile(fileobj)
close = True
else:
zfile = fileobj
close = False
namelist = zfile.namelist()
if len(namelist) != 1:
raise OSError(
"Zip files with multiple members are not supported.")
self._file = tempfile.NamedTemporaryFile(suffix='.fits')
self._file.write(zfile.read(namelist[0]))
if close:
zfile.close()
# We just wrote the contents of the first file in the archive to a new
# temp file, which now serves as our underlying file object. So it's
# necessary to reset the position back to the beginning
self._file.seek(0)
|
2351ec827014ffff83ae7491dcdd6cf6c7c615a3ebb5eb70016c7336bca34356 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import re
import warnings
from copy import deepcopy
import numpy as np
from astropy.io import registry as io_registry
from astropy import units as u
from astropy.table import Table, serialize, meta, Column, MaskedColumn
from astropy.time import Time
from astropy.utils.data_info import serialize_context_as
from astropy.utils.exceptions import (AstropyUserWarning,
AstropyDeprecationWarning)
from astropy.utils.misc import NOT_OVERWRITING_MSG
from . import HDUList, TableHDU, BinTableHDU, GroupsHDU, append as fits_append
from .column import KEYWORD_NAMES, _fortran_to_python_format
from .convenience import table_to_hdu
from .hdu.hdulist import fitsopen as fits_open, FITS_SIGNATURE
from .util import first
# Keywords to remove for all tables that are read in
REMOVE_KEYWORDS = ['XTENSION', 'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2',
'PCOUNT', 'GCOUNT', 'TFIELDS', 'THEAP']
# Column-specific keywords regex
COLUMN_KEYWORD_REGEXP = '(' + '|'.join(KEYWORD_NAMES) + ')[0-9]+'
def is_column_keyword(keyword):
return re.match(COLUMN_KEYWORD_REGEXP, keyword) is not None
def is_fits(origin, filepath, fileobj, *args, **kwargs):
"""
Determine whether `origin` is a FITS file.
Parameters
----------
origin : str or readable file-like
Path or file object containing a potential FITS file.
Returns
-------
is_fits : bool
Returns `True` if the given file is a FITS file.
"""
if fileobj is not None:
pos = fileobj.tell()
sig = fileobj.read(30)
fileobj.seek(pos)
return sig == FITS_SIGNATURE
elif filepath is not None:
if filepath.lower().endswith(('.fits', '.fits.gz', '.fit', '.fit.gz',
'.fts', '.fts.gz')):
return True
elif isinstance(args[0], (HDUList, TableHDU, BinTableHDU, GroupsHDU)):
return True
else:
return False
def _decode_mixins(tbl):
"""Decode a Table ``tbl`` that has astropy Columns + appropriate meta-data into
the corresponding table with mixin columns (as appropriate).
"""
# If available read in __serialized_columns__ meta info which is stored
# in FITS COMMENTS between two sentinels.
try:
i0 = tbl.meta['comments'].index('--BEGIN-ASTROPY-SERIALIZED-COLUMNS--')
i1 = tbl.meta['comments'].index('--END-ASTROPY-SERIALIZED-COLUMNS--')
except (ValueError, KeyError):
return tbl
# The YAML data are split into COMMENT cards, with lines longer than 70
# characters being split with a continuation character \ (backslash).
# Strip the backslashes and join together.
continuation_line = False
lines = []
for line in tbl.meta['comments'][i0 + 1:i1]:
if continuation_line:
lines[-1] = lines[-1] + line[:70]
else:
lines.append(line[:70])
continuation_line = len(line) == 71
del tbl.meta['comments'][i0:i1 + 1]
if not tbl.meta['comments']:
del tbl.meta['comments']
info = meta.get_header_from_yaml(lines)
# Add serialized column information to table meta for use in constructing mixins
tbl.meta['__serialized_columns__'] = info['meta']['__serialized_columns__']
# Use the `datatype` attribute info to update column attributes that are
# NOT already handled via standard FITS column keys (name, dtype, unit).
for col in info['datatype']:
for attr in ['description', 'meta']:
if attr in col:
setattr(tbl[col['name']].info, attr, col[attr])
# Construct new table with mixins, using tbl.meta['__serialized_columns__']
# as guidance.
tbl = serialize._construct_mixins_from_columns(tbl)
return tbl
def read_table_fits(input, hdu=None, astropy_native=False, memmap=False,
character_as_bytes=True, unit_parse_strict='warn',
mask_invalid=True):
"""
Read a Table object from an FITS file
If the ``astropy_native`` argument is ``True``, then input FITS columns
which are representations of an astropy core object will be converted to
that class and stored in the ``Table`` as "mixin columns". Currently this
is limited to FITS columns which adhere to the FITS Time standard, in which
case they will be converted to a `~astropy.time.Time` column in the output
table.
Parameters
----------
input : str or file-like or compatible `astropy.io.fits` HDU object
If a string, the filename to read the table from. If a file object, or
a compatible HDU object, the object to extract the table from. The
following `astropy.io.fits` HDU objects can be used as input:
- :class:`~astropy.io.fits.hdu.table.TableHDU`
- :class:`~astropy.io.fits.hdu.table.BinTableHDU`
- :class:`~astropy.io.fits.hdu.table.GroupsHDU`
- :class:`~astropy.io.fits.hdu.hdulist.HDUList`
hdu : int or str, optional
The HDU to read the table from.
astropy_native : bool, optional
Read in FITS columns as native astropy objects where possible instead
of standard Table Column objects. Default is False.
memmap : bool, optional
Whether to use memory mapping, which accesses data on disk as needed. If
you are only accessing part of the data, this is often more efficient.
If you want to access all the values in the table, and you are able to
fit the table in memory, you may be better off leaving memory mapping
off. However, if your table would not fit in memory, you should set this
to `True`.
When set to `True` then ``mask_invalid`` is set to `False` since the
masking would cause loading the full data array.
character_as_bytes : bool, optional
If `True`, string columns are stored as Numpy byte arrays (dtype ``S``)
and are converted on-the-fly to unicode strings when accessing
individual elements. If you need to use Numpy unicode arrays (dtype
``U``) internally, you should set this to `False`, but note that this
will use more memory. If set to `False`, string columns will not be
memory-mapped even if ``memmap`` is `True`.
unit_parse_strict : str, optional
Behaviour when encountering invalid column units in the FITS header.
Default is "warn", which will emit a ``UnitsWarning`` and create a
:class:`~astropy.units.core.UnrecognizedUnit`.
Values are the ones allowed by the ``parse_strict`` argument of
:class:`~astropy.units.core.Unit`: ``raise``, ``warn`` and ``silent``.
mask_invalid : bool, optional
By default the code masks NaNs in float columns and empty strings in
string columns. Set this parameter to `False` to avoid the performance
penalty of doing this masking step. The masking is always deactivated
when using ``memmap=True`` (see above).
"""
if isinstance(input, HDUList):
# Parse all table objects
tables = dict()
for ihdu, hdu_item in enumerate(input):
if isinstance(hdu_item, (TableHDU, BinTableHDU, GroupsHDU)):
tables[ihdu] = hdu_item
if len(tables) > 1:
if hdu is None:
warnings.warn("hdu= was not specified but multiple tables"
" are present, reading in first available"
f" table (hdu={first(tables)})",
AstropyUserWarning)
hdu = first(tables)
# hdu might not be an integer, so we first need to convert it
# to the correct HDU index
hdu = input.index_of(hdu)
if hdu in tables:
table = tables[hdu]
else:
raise ValueError(f"No table found in hdu={hdu}")
elif len(tables) == 1:
if hdu is not None:
msg = None
try:
hdi = input.index_of(hdu)
except KeyError:
msg = f"Specified hdu={hdu} not found"
else:
if hdi >= len(input):
msg = f"Specified hdu={hdu} not found"
elif hdi not in tables:
msg = f"No table found in specified hdu={hdu}"
if msg is not None:
warnings.warn(f"{msg}, reading in first available table "
f"(hdu={first(tables)}) instead. This will"
" result in an error in future versions!",
AstropyDeprecationWarning)
table = tables[first(tables)]
else:
raise ValueError("No table found")
elif isinstance(input, (TableHDU, BinTableHDU, GroupsHDU)):
table = input
else:
if memmap:
# using memmap is not compatible with masking invalid value by
# default so we deactivate the masking
mask_invalid = False
hdulist = fits_open(input, character_as_bytes=character_as_bytes,
memmap=memmap)
try:
return read_table_fits(
hdulist, hdu=hdu,
astropy_native=astropy_native,
unit_parse_strict=unit_parse_strict,
mask_invalid=mask_invalid,
)
finally:
hdulist.close()
# In the loop below we access the data using data[col.name] rather than
# col.array to make sure that the data is scaled correctly if needed.
data = table.data
columns = []
for col in data.columns:
# Check if column is masked. Here, we make a guess based on the
# presence of FITS mask values. For integer columns, this is simply
# the null header, for float and complex, the presence of NaN, and for
# string, empty strings.
# Since Multi-element columns with dtypes such as '2f8' have a subdtype,
# we should look up the type of column on that.
masked = mask = False
coltype = (col.dtype.subdtype[0].type if col.dtype.subdtype
else col.dtype.type)
if col.null is not None:
mask = data[col.name] == col.null
# Return a MaskedColumn even if no elements are masked so
# we roundtrip better.
masked = True
elif mask_invalid and issubclass(coltype, np.inexact):
mask = np.isnan(data[col.name])
elif mask_invalid and issubclass(coltype, np.character):
mask = col.array == b''
if masked or np.any(mask):
column = MaskedColumn(data=data[col.name], name=col.name,
mask=mask, copy=False)
else:
column = Column(data=data[col.name], name=col.name, copy=False)
# Copy over units
if col.unit is not None:
column.unit = u.Unit(col.unit, format='fits', parse_strict=unit_parse_strict)
# Copy over display format
if col.disp is not None:
column.format = _fortran_to_python_format(col.disp)
columns.append(column)
# Create Table object
t = Table(columns, copy=False)
# TODO: deal properly with unsigned integers
hdr = table.header
if astropy_native:
# Avoid circular imports, and also only import if necessary.
from .fitstime import fits_to_time
hdr = fits_to_time(hdr, t)
for key, value, comment in hdr.cards:
if key in ['COMMENT', 'HISTORY']:
# Convert to io.ascii format
if key == 'COMMENT':
key = 'comments'
if key in t.meta:
t.meta[key].append(value)
else:
t.meta[key] = [value]
elif key in t.meta: # key is duplicate
if isinstance(t.meta[key], list):
t.meta[key].append(value)
else:
t.meta[key] = [t.meta[key], value]
elif is_column_keyword(key) or key in REMOVE_KEYWORDS:
pass
else:
t.meta[key] = value
# TODO: implement masking
# Decode any mixin columns that have been stored as standard Columns.
t = _decode_mixins(t)
return t
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
# Determine if information will be lost without serializing meta. This is hardcoded
# to the set difference between column info attributes and what FITS can store
# natively (name, dtype, unit). See _get_col_attributes() in table/meta.py for where
# this comes from.
info_lost = any(any(getattr(col.info, attr, None) not in (None, {})
for attr in ('description', 'meta'))
for col in tbl.itercols())
# Convert the table to one with no mixins, only Column objects. This adds
# meta data which is extracted with meta.get_yaml_from_table. This ignores
# Time-subclass columns and leave them in the table so that the downstream
# FITS Time handling does the right thing.
with serialize_context_as('fits'):
encode_tbl = serialize.represent_mixins_as_columns(
tbl, exclude_classes=(Time,))
# If the encoded table is unchanged then there were no mixins. But if there
# is column metadata (format, description, meta) that would be lost, then
# still go through the serialized columns machinery.
if encode_tbl is tbl and not info_lost:
return tbl
# Copy the meta dict if it was not copied by represent_mixins_as_columns.
# We will modify .meta['comments'] below and we do not want to see these
# comments in the input table.
if encode_tbl is tbl:
meta_copy = deepcopy(tbl.meta)
encode_tbl = Table(tbl.columns, meta=meta_copy, copy=False)
# Get the YAML serialization of information describing the table columns.
# This is re-using ECSV code that combined existing table.meta with with
# the extra __serialized_columns__ key. For FITS the table.meta is handled
# by the native FITS connect code, so don't include that in the YAML
# output.
ser_col = '__serialized_columns__'
# encode_tbl might not have a __serialized_columns__ key if there were no mixins,
# but machinery below expects it to be available, so just make an empty dict.
encode_tbl.meta.setdefault(ser_col, {})
tbl_meta_copy = encode_tbl.meta.copy()
try:
encode_tbl.meta = {ser_col: encode_tbl.meta[ser_col]}
meta_yaml_lines = meta.get_yaml_from_table(encode_tbl)
finally:
encode_tbl.meta = tbl_meta_copy
del encode_tbl.meta[ser_col]
if 'comments' not in encode_tbl.meta:
encode_tbl.meta['comments'] = []
encode_tbl.meta['comments'].append('--BEGIN-ASTROPY-SERIALIZED-COLUMNS--')
for line in meta_yaml_lines:
if len(line) == 0:
lines = ['']
else:
# Split line into 70 character chunks for COMMENT cards
idxs = list(range(0, len(line) + 70, 70))
lines = [line[i0:i1] + '\\' for i0, i1 in zip(idxs[:-1], idxs[1:])]
lines[-1] = lines[-1][:-1]
encode_tbl.meta['comments'].extend(lines)
encode_tbl.meta['comments'].append('--END-ASTROPY-SERIALIZED-COLUMNS--')
return encode_tbl
def write_table_fits(input, output, overwrite=False, append=False):
"""
Write a Table object to a FITS file
Parameters
----------
input : Table
The table to write out.
output : str
The filename to write the table to.
overwrite : bool
Whether to overwrite any existing file without warning.
append : bool
Whether to append the table to an existing file
"""
# Encode any mixin columns into standard Columns.
input = _encode_mixins(input)
table_hdu = table_to_hdu(input, character_as_bytes=True)
# Check if output file already exists
if isinstance(output, str) and os.path.exists(output):
if overwrite:
os.remove(output)
elif not append:
raise OSError(NOT_OVERWRITING_MSG.format(output))
if append:
# verify=False stops it reading and checking the existing file.
fits_append(output, table_hdu.data, table_hdu.header, verify=False)
else:
table_hdu.writeto(output)
io_registry.register_reader('fits', Table, read_table_fits)
io_registry.register_writer('fits', Table, write_table_fits)
io_registry.register_identifier('fits', Table, is_fits)
|
35926f6103fa405fc4e3a3fcd245653dd002e9ab3efd22034096e477af146e8d | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import collections
import copy
import itertools
import numbers
import re
import warnings
from .card import Card, _pad, KEYWORD_LENGTH, UNDEFINED
from .file import _File
from .util import (encode_ascii, decode_ascii, fileobj_closed,
fileobj_is_binary, path_like)
from ._utils import parse_header
from astropy.utils import isiterable
from astropy.utils.exceptions import AstropyUserWarning
BLOCK_SIZE = 2880 # the FITS block size
# This regular expression can match a *valid* END card which just consists of
# the string 'END' followed by all spaces, or an *invalid* end card which
# consists of END, followed by any character that is *not* a valid character
# for a valid FITS keyword (that is, this is not a keyword like 'ENDER' which
# starts with 'END' but is not 'END'), followed by any arbitrary bytes. An
# invalid end card may also consist of just 'END' with no trailing bytes.
HEADER_END_RE = re.compile(encode_ascii(
r'(?:(?P<valid>END {77}) *)|(?P<invalid>END$|END {0,76}[^A-Z0-9_-])'))
# According to the FITS standard the only characters that may appear in a
# header record are the restricted ASCII chars from 0x20 through 0x7E.
VALID_HEADER_CHARS = set(map(chr, range(0x20, 0x7F)))
END_CARD = 'END' + ' ' * 77
__doctest_skip__ = ['Header', 'Header.comments', 'Header.fromtextfile',
'Header.totextfile', 'Header.set', 'Header.update']
class Header:
"""
FITS header class. This class exposes both a dict-like interface and a
list-like interface to FITS headers.
The header may be indexed by keyword and, like a dict, the associated value
will be returned. When the header contains cards with duplicate keywords,
only the value of the first card with the given keyword will be returned.
It is also possible to use a 2-tuple as the index in the form (keyword,
n)--this returns the n-th value with that keyword, in the case where there
are duplicate keywords.
For example::
>>> header['NAXIS']
0
>>> header[('FOO', 1)] # Return the value of the second FOO keyword
'foo'
The header may also be indexed by card number::
>>> header[0] # Return the value of the first card in the header
'T'
Commentary keywords such as HISTORY and COMMENT are special cases: When
indexing the Header object with either 'HISTORY' or 'COMMENT' a list of all
the HISTORY/COMMENT values is returned::
>>> header['HISTORY']
This is the first history entry in this header.
This is the second history entry in this header.
...
See the Astropy documentation for more details on working with headers.
Notes
-----
Although FITS keywords must be exclusively upper case, retrieving an item
in a `Header` object is case insensitive.
"""
def __init__(self, cards=[], copy=False):
"""
Construct a `Header` from an iterable and/or text file.
Parameters
----------
cards : list of `Card`, optional
The cards to initialize the header with. Also allowed are other
`Header` (or `dict`-like) objects.
.. versionchanged:: 1.2
Allowed ``cards`` to be a `dict`-like object.
copy : bool, optional
If ``True`` copies the ``cards`` if they were another `Header`
instance.
Default is ``False``.
.. versionadded:: 1.3
"""
self.clear()
if isinstance(cards, Header):
if copy:
cards = cards.copy()
cards = cards.cards
elif isinstance(cards, dict):
cards = cards.items()
for card in cards:
self.append(card, end=True)
self._modified = False
def __len__(self):
return len(self._cards)
def __iter__(self):
for card in self._cards:
yield card.keyword
def __contains__(self, keyword):
if keyword in self._keyword_indices or keyword in self._rvkc_indices:
# For the most common case (single, standard form keyword lookup)
# this will work and is an O(1) check. If it fails that doesn't
# guarantee absence, just that we have to perform the full set of
# checks in self._cardindex
return True
try:
self._cardindex(keyword)
except (KeyError, IndexError):
return False
return True
def __getitem__(self, key):
if isinstance(key, slice):
return self.__class__([copy.copy(c) for c in self._cards[key]])
elif self._haswildcard(key):
return self.__class__([copy.copy(self._cards[idx])
for idx in self._wildcardmatch(key)])
elif isinstance(key, str):
key = key.strip()
if key.upper() in Card._commentary_keywords:
key = key.upper()
# Special case for commentary cards
return _HeaderCommentaryCards(self, key)
if isinstance(key, tuple):
keyword = key[0]
else:
keyword = key
card = self._cards[self._cardindex(key)]
if card.field_specifier is not None and keyword == card.rawkeyword:
# This is RVKC; if only the top-level keyword was specified return
# the raw value, not the parsed out float value
return card.rawvalue
value = card.value
if value == UNDEFINED:
return None
return value
def __setitem__(self, key, value):
if self._set_slice(key, value, self):
return
if isinstance(value, tuple):
if len(value) > 2:
raise ValueError(
'A Header item may be set with either a scalar value, '
'a 1-tuple containing a scalar value, or a 2-tuple '
'containing a scalar value and comment string.')
if len(value) == 1:
value, comment = value[0], None
if value is None:
value = UNDEFINED
elif len(value) == 2:
value, comment = value
if value is None:
value = UNDEFINED
if comment is None:
comment = ''
else:
comment = None
card = None
if isinstance(key, numbers.Integral):
card = self._cards[key]
elif isinstance(key, tuple):
card = self._cards[self._cardindex(key)]
if value is None:
value = UNDEFINED
if card:
card.value = value
if comment is not None:
card.comment = comment
if card._modified:
self._modified = True
else:
# If we get an IndexError that should be raised; we don't allow
# assignment to non-existing indices
self._update((key, value, comment))
def __delitem__(self, key):
if isinstance(key, slice) or self._haswildcard(key):
# This is very inefficient but it's not a commonly used feature.
# If someone out there complains that they make heavy use of slice
# deletions and it's too slow, well, we can worry about it then
# [the solution is not too complicated--it would be wait 'til all
# the cards are deleted before updating _keyword_indices rather
# than updating it once for each card that gets deleted]
if isinstance(key, slice):
indices = range(*key.indices(len(self)))
# If the slice step is backwards we want to reverse it, because
# it will be reversed in a few lines...
if key.step and key.step < 0:
indices = reversed(indices)
else:
indices = self._wildcardmatch(key)
for idx in reversed(indices):
del self[idx]
return
elif isinstance(key, str):
# delete ALL cards with the same keyword name
key = Card.normalize_keyword(key)
indices = self._keyword_indices
if key not in self._keyword_indices:
indices = self._rvkc_indices
if key not in indices:
# if keyword is not present raise KeyError.
# To delete keyword without caring if they were present,
# Header.remove(Keyword) can be used with optional argument ignore_missing as True
raise KeyError(f"Keyword '{key}' not found.")
for idx in reversed(indices[key]):
# Have to copy the indices list since it will be modified below
del self[idx]
return
idx = self._cardindex(key)
card = self._cards[idx]
keyword = card.keyword
del self._cards[idx]
keyword = Card.normalize_keyword(keyword)
indices = self._keyword_indices[keyword]
indices.remove(idx)
if not indices:
del self._keyword_indices[keyword]
# Also update RVKC indices if necessary :/
if card.field_specifier is not None:
indices = self._rvkc_indices[card.rawkeyword]
indices.remove(idx)
if not indices:
del self._rvkc_indices[card.rawkeyword]
# We also need to update all other indices
self._updateindices(idx, increment=False)
self._modified = True
def __repr__(self):
return self.tostring(sep='\n', endcard=False, padding=False)
def __str__(self):
return self.tostring()
def __eq__(self, other):
"""
Two Headers are equal only if they have the exact same string
representation.
"""
return str(self) == str(other)
def __add__(self, other):
temp = self.copy(strip=False)
temp.extend(other)
return temp
def __iadd__(self, other):
self.extend(other)
return self
def _ipython_key_completions_(self):
return self.__iter__()
@property
def cards(self):
"""
The underlying physical cards that make up this Header; it can be
looked at, but it should not be modified directly.
"""
return _CardAccessor(self)
@property
def comments(self):
"""
View the comments associated with each keyword, if any.
For example, to see the comment on the NAXIS keyword:
>>> header.comments['NAXIS']
number of data axes
Comments can also be updated through this interface:
>>> header.comments['NAXIS'] = 'Number of data axes'
"""
return _HeaderComments(self)
@property
def _modified(self):
"""
Whether or not the header has been modified; this is a property so that
it can also check each card for modifications--cards may have been
modified directly without the header containing it otherwise knowing.
"""
modified_cards = any(c._modified for c in self._cards)
if modified_cards:
# If any cards were modified then by definition the header was
# modified
self.__dict__['_modified'] = True
return self.__dict__['_modified']
@_modified.setter
def _modified(self, val):
self.__dict__['_modified'] = val
@classmethod
def fromstring(cls, data, sep=''):
"""
Creates an HDU header from a byte string containing the entire header
data.
Parameters
----------
data : str or bytes
String or bytes containing the entire header. In the case of bytes
they will be decoded using latin-1 (only plain ASCII characters are
allowed in FITS headers but latin-1 allows us to retain any invalid
bytes that might appear in malformatted FITS files).
sep : str, optional
The string separating cards from each other, such as a newline. By
default there is no card separator (as is the case in a raw FITS
file). In general this is only used in cases where a header was
printed as text (e.g. with newlines after each card) and you want
to create a new `Header` from it by copy/pasting.
Examples
--------
>>> from astropy.io.fits import Header
>>> hdr = Header({'SIMPLE': True})
>>> Header.fromstring(hdr.tostring()) == hdr
True
If you want to create a `Header` from printed text it's not necessary
to have the exact binary structure as it would appear in a FITS file,
with the full 80 byte card length. Rather, each "card" can end in a
newline and does not have to be padded out to a full card length as
long as it "looks like" a FITS header:
>>> hdr = Header.fromstring(\"\"\"\\
... SIMPLE = T / conforms to FITS standard
... BITPIX = 8 / array data type
... NAXIS = 0 / number of array dimensions
... EXTEND = T
... \"\"\", sep='\\n')
>>> hdr['SIMPLE']
True
>>> hdr['BITPIX']
8
>>> len(hdr)
4
Returns
-------
`Header`
A new `Header` instance.
"""
cards = []
# If the card separator contains characters that may validly appear in
# a card, the only way to unambiguously distinguish between cards is to
# require that they be Card.length long. However, if the separator
# contains non-valid characters (namely \n) the cards may be split
# immediately at the separator
require_full_cardlength = set(sep).issubset(VALID_HEADER_CHARS)
if isinstance(data, bytes):
# FITS supports only ASCII, but decode as latin1 and just take all
# bytes for now; if it results in mojibake due to e.g. UTF-8
# encoded data in a FITS header that's OK because it shouldn't be
# there in the first place--accepting it here still gives us the
# opportunity to display warnings later during validation
CONTINUE = b'CONTINUE'
END = b'END'
end_card = END_CARD.encode('ascii')
sep = sep.encode('latin1')
empty = b''
else:
CONTINUE = 'CONTINUE'
END = 'END'
end_card = END_CARD
empty = ''
# Split the header into individual cards
idx = 0
image = []
while idx < len(data):
if require_full_cardlength:
end_idx = idx + Card.length
else:
try:
end_idx = data.index(sep, idx)
except ValueError:
end_idx = len(data)
next_image = data[idx:end_idx]
idx = end_idx + len(sep)
if image:
if next_image[:8] == CONTINUE:
image.append(next_image)
continue
cards.append(Card.fromstring(empty.join(image)))
if require_full_cardlength:
if next_image == end_card:
image = []
break
else:
if next_image.split(sep)[0].rstrip() == END:
image = []
break
image = [next_image]
# Add the last image that was found before the end, if any
if image:
cards.append(Card.fromstring(empty.join(image)))
return cls._fromcards(cards)
@classmethod
def fromfile(cls, fileobj, sep='', endcard=True, padding=True):
"""
Similar to :meth:`Header.fromstring`, but reads the header string from
a given file-like object or filename.
Parameters
----------
fileobj : str, file-like
A filename or an open file-like object from which a FITS header is
to be read. For open file handles the file pointer must be at the
beginning of the header.
sep : str, optional
The string separating cards from each other, such as a newline. By
default there is no card separator (as is the case in a raw FITS
file).
endcard : bool, optional
If True (the default) the header must end with an END card in order
to be considered valid. If an END card is not found an
`OSError` is raised.
padding : bool, optional
If True (the default) the header will be required to be padded out
to a multiple of 2880, the FITS header block size. Otherwise any
padding, or lack thereof, is ignored.
Returns
-------
`Header`
A new `Header` instance.
"""
close_file = False
if isinstance(fileobj, path_like):
# If sep is non-empty we are trying to read a header printed to a
# text file, so open in text mode by default to support newline
# handling; if a binary-mode file object is passed in, the user is
# then on their own w.r.t. newline handling.
#
# Otherwise assume we are reading from an actual FITS file and open
# in binary mode.
if sep:
fileobj = open(fileobj, 'r', encoding='latin1')
else:
fileobj = open(fileobj, 'rb')
close_file = True
try:
is_binary = fileobj_is_binary(fileobj)
def block_iter(nbytes):
while True:
data = fileobj.read(nbytes)
if data:
yield data
else:
break
return cls._from_blocks(block_iter, is_binary, sep, endcard,
padding)[1]
finally:
if close_file:
fileobj.close()
@classmethod
def _fromcards(cls, cards):
header = cls()
for idx, card in enumerate(cards):
header._cards.append(card)
keyword = Card.normalize_keyword(card.keyword)
header._keyword_indices[keyword].append(idx)
if card.field_specifier is not None:
header._rvkc_indices[card.rawkeyword].append(idx)
header._modified = False
return header
@classmethod
def _from_blocks(cls, block_iter, is_binary, sep, endcard, padding):
"""
The meat of `Header.fromfile`; in a separate method so that
`Header.fromfile` itself is just responsible for wrapping file
handling. Also used by `_BaseHDU.fromstring`.
``block_iter`` should be a callable which, given a block size n
(typically 2880 bytes as used by the FITS standard) returns an iterator
of byte strings of that block size.
``is_binary`` specifies whether the returned blocks are bytes or text
Returns both the entire header *string*, and the `Header` object
returned by Header.fromstring on that string.
"""
actual_block_size = _block_size(sep)
clen = Card.length + len(sep)
blocks = block_iter(actual_block_size)
# Read the first header block.
try:
block = next(blocks)
except StopIteration:
raise EOFError()
if not is_binary:
# TODO: There needs to be error handling at *this* level for
# non-ASCII characters; maybe at this stage decoding latin-1 might
# be safer
block = encode_ascii(block)
read_blocks = []
is_eof = False
end_found = False
# continue reading header blocks until END card or EOF is reached
while True:
# find the END card
end_found, block = cls._find_end_card(block, clen)
read_blocks.append(decode_ascii(block))
if end_found:
break
try:
block = next(blocks)
except StopIteration:
is_eof = True
break
if not block:
is_eof = True
break
if not is_binary:
block = encode_ascii(block)
header_str = ''.join(read_blocks)
_check_padding(header_str, actual_block_size, is_eof,
check_block_size=padding)
if not end_found and is_eof and endcard:
# TODO: Pass this error to validation framework as an ERROR,
# rather than raising an exception
raise OSError('Header missing END card.')
return header_str, cls.fromstring(header_str, sep=sep)
@classmethod
def _find_end_card(cls, block, card_len):
"""
Utility method to search a header block for the END card and handle
invalid END cards.
This method can also returned a modified copy of the input header block
in case an invalid end card needs to be sanitized.
"""
for mo in HEADER_END_RE.finditer(block):
# Ensure the END card was found, and it started on the
# boundary of a new card (see ticket #142)
if mo.start() % card_len != 0:
continue
# This must be the last header block, otherwise the
# file is malformatted
if mo.group('invalid'):
offset = mo.start()
trailing = block[offset + 3:offset + card_len - 3].rstrip()
if trailing:
trailing = repr(trailing).lstrip('ub')
# TODO: Pass this warning up to the validation framework
warnings.warn(
'Unexpected bytes trailing END keyword: {}; these '
'bytes will be replaced with spaces on write.'.format(
trailing), AstropyUserWarning)
else:
# TODO: Pass this warning up to the validation framework
warnings.warn(
'Missing padding to end of the FITS block after the '
'END keyword; additional spaces will be appended to '
'the file upon writing to pad out to {} '
'bytes.'.format(BLOCK_SIZE), AstropyUserWarning)
# Sanitize out invalid END card now that the appropriate
# warnings have been issued
block = (block[:offset] + encode_ascii(END_CARD) +
block[offset + len(END_CARD):])
return True, block
return False, block
def tostring(self, sep='', endcard=True, padding=True):
r"""
Returns a string representation of the header.
By default this uses no separator between cards, adds the END card, and
pads the string with spaces to the next multiple of 2880 bytes. That
is, it returns the header exactly as it would appear in a FITS file.
Parameters
----------
sep : str, optional
The character or string with which to separate cards. By default
there is no separator, but one could use ``'\\n'``, for example, to
separate each card with a new line
endcard : bool, optional
If True (default) adds the END card to the end of the header
string
padding : bool, optional
If True (default) pads the string with spaces out to the next
multiple of 2880 characters
Returns
-------
str
A string representing a FITS header.
"""
lines = []
for card in self._cards:
s = str(card)
# Cards with CONTINUE cards may be longer than 80 chars; so break
# them into multiple lines
while s:
lines.append(s[:Card.length])
s = s[Card.length:]
s = sep.join(lines)
if endcard:
s += sep + _pad('END')
if padding:
s += ' ' * _pad_length(len(s))
return s
def tofile(self, fileobj, sep='', endcard=True, padding=True,
overwrite=False):
r"""
Writes the header to file or file-like object.
By default this writes the header exactly as it would be written to a
FITS file, with the END card included and padding to the next multiple
of 2880 bytes. However, aspects of this may be controlled.
Parameters
----------
fileobj : path-like or file-like, optional
Either the pathname of a file, or an open file handle or file-like
object.
sep : str, optional
The character or string with which to separate cards. By default
there is no separator, but one could use ``'\\n'``, for example, to
separate each card with a new line
endcard : bool, optional
If `True` (default) adds the END card to the end of the header
string
padding : bool, optional
If `True` (default) pads the string with spaces out to the next
multiple of 2880 characters
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
"""
close_file = fileobj_closed(fileobj)
if not isinstance(fileobj, _File):
fileobj = _File(fileobj, mode='ostream', overwrite=overwrite)
try:
blocks = self.tostring(sep=sep, endcard=endcard, padding=padding)
actual_block_size = _block_size(sep)
if padding and len(blocks) % actual_block_size != 0:
raise OSError(
'Header size ({}) is not a multiple of block '
'size ({}).'.format(
len(blocks) - actual_block_size + BLOCK_SIZE,
BLOCK_SIZE))
fileobj.flush()
fileobj.write(blocks.encode('ascii'))
fileobj.flush()
finally:
if close_file:
fileobj.close()
@classmethod
def fromtextfile(cls, fileobj, endcard=False):
"""
Read a header from a simple text file or file-like object.
Equivalent to::
>>> Header.fromfile(fileobj, sep='\\n', endcard=False,
... padding=False)
See Also
--------
fromfile
"""
return cls.fromfile(fileobj, sep='\n', endcard=endcard, padding=False)
def totextfile(self, fileobj, endcard=False, overwrite=False):
"""
Write the header as text to a file or a file-like object.
Equivalent to::
>>> Header.tofile(fileobj, sep='\\n', endcard=False,
... padding=False, overwrite=overwrite)
See Also
--------
tofile
"""
self.tofile(fileobj, sep='\n', endcard=endcard, padding=False,
overwrite=overwrite)
def clear(self):
"""
Remove all cards from the header.
"""
self._cards = []
self._keyword_indices = collections.defaultdict(list)
self._rvkc_indices = collections.defaultdict(list)
def copy(self, strip=False):
"""
Make a copy of the :class:`Header`.
.. versionchanged:: 1.3
`copy.copy` and `copy.deepcopy` on a `Header` will call this
method.
Parameters
----------
strip : bool, optional
If `True`, strip any headers that are specific to one of the
standard HDU types, so that this header can be used in a different
HDU.
Returns
-------
`Header`
A new :class:`Header` instance.
"""
tmp = self.__class__((copy.copy(card) for card in self._cards))
if strip:
tmp.strip()
return tmp
def __copy__(self):
return self.copy()
def __deepcopy__(self, *args, **kwargs):
return self.copy()
@classmethod
def fromkeys(cls, iterable, value=None):
"""
Similar to :meth:`dict.fromkeys`--creates a new `Header` from an
iterable of keywords and an optional default value.
This method is not likely to be particularly useful for creating real
world FITS headers, but it is useful for testing.
Parameters
----------
iterable
Any iterable that returns strings representing FITS keywords.
value : optional
A default value to assign to each keyword; must be a valid type for
FITS keywords.
Returns
-------
`Header`
A new `Header` instance.
"""
d = cls()
if not isinstance(value, tuple):
value = (value,)
for key in iterable:
d.append((key,) + value)
return d
def get(self, key, default=None):
"""
Similar to :meth:`dict.get`--returns the value associated with keyword
in the header, or a default value if the keyword is not found.
Parameters
----------
key : str
A keyword that may or may not be in the header.
default : optional
A default value to return if the keyword is not found in the
header.
Returns
-------
value: str, number, complex, bool, or ``astropy.io.fits.card.Undefined``
The value associated with the given keyword, or the default value
if the keyword is not in the header.
"""
try:
return self[key]
except (KeyError, IndexError):
return default
def set(self, keyword, value=None, comment=None, before=None, after=None):
"""
Set the value and/or comment and/or position of a specified keyword.
If the keyword does not already exist in the header, a new keyword is
created in the specified position, or appended to the end of the header
if no position is specified.
This method is similar to :meth:`Header.update` prior to Astropy v0.1.
.. note::
It should be noted that ``header.set(keyword, value)`` and
``header.set(keyword, value, comment)`` are equivalent to
``header[keyword] = value`` and
``header[keyword] = (value, comment)`` respectively.
New keywords can also be inserted relative to existing keywords
using, for example::
>>> header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes'))
to insert before an existing keyword, or::
>>> header.insert('NAXIS', ('NAXIS1', 4096), after=True)
to insert after an existing keyword.
The only advantage of using :meth:`Header.set` is that it
easily replaces the old usage of :meth:`Header.update` both
conceptually and in terms of function signature.
Parameters
----------
keyword : str
A header keyword
value : str, optional
The value to set for the given keyword; if None the existing value
is kept, but '' may be used to set a blank value
comment : str, optional
The comment to set for the given keyword; if None the existing
comment is kept, but ``''`` may be used to set a blank comment
before : str, int, optional
Name of the keyword, or index of the `Card` before which this card
should be located in the header. The argument ``before`` takes
precedence over ``after`` if both specified.
after : str, int, optional
Name of the keyword, or index of the `Card` after which this card
should be located in the header.
"""
# Create a temporary card that looks like the one being set; if the
# temporary card turns out to be a RVKC this will make it easier to
# deal with the idiosyncrasies thereof
# Don't try to make a temporary card though if they keyword looks like
# it might be a HIERARCH card or is otherwise invalid--this step is
# only for validating RVKCs.
if (len(keyword) <= KEYWORD_LENGTH and
Card._keywd_FSC_RE.match(keyword) and
keyword not in self._keyword_indices):
new_card = Card(keyword, value, comment)
new_keyword = new_card.keyword
else:
new_keyword = keyword
if (new_keyword not in Card._commentary_keywords and
new_keyword in self):
if comment is None:
comment = self.comments[keyword]
if value is None:
value = self[keyword]
self[keyword] = (value, comment)
if before is not None or after is not None:
card = self._cards[self._cardindex(keyword)]
self._relativeinsert(card, before=before, after=after,
replace=True)
elif before is not None or after is not None:
self._relativeinsert((keyword, value, comment), before=before,
after=after)
else:
self[keyword] = (value, comment)
def items(self):
"""Like :meth:`dict.items`."""
for card in self._cards:
yield card.keyword, None if card.value == UNDEFINED else card.value
def keys(self):
"""
Like :meth:`dict.keys`--iterating directly over the `Header`
instance has the same behavior.
"""
for card in self._cards:
yield card.keyword
def values(self):
"""Like :meth:`dict.values`."""
for card in self._cards:
yield None if card.value == UNDEFINED else card.value
def pop(self, *args):
"""
Works like :meth:`list.pop` if no arguments or an index argument are
supplied; otherwise works like :meth:`dict.pop`.
"""
if len(args) > 2:
raise TypeError(f'Header.pop expected at most 2 arguments, got {len(args)}')
if len(args) == 0:
key = -1
else:
key = args[0]
try:
value = self[key]
except (KeyError, IndexError):
if len(args) == 2:
return args[1]
raise
del self[key]
return value
def popitem(self):
"""Similar to :meth:`dict.popitem`."""
try:
k, v = next(self.items())
except StopIteration:
raise KeyError('Header is empty')
del self[k]
return k, v
def setdefault(self, key, default=None):
"""Similar to :meth:`dict.setdefault`."""
try:
return self[key]
except (KeyError, IndexError):
self[key] = default
return default
def update(self, *args, **kwargs):
"""
Update the Header with new keyword values, updating the values of
existing keywords and appending new keywords otherwise; similar to
`dict.update`.
`update` accepts either a dict-like object or an iterable. In the
former case the keys must be header keywords and the values may be
either scalar values or (value, comment) tuples. In the case of an
iterable the items must be (keyword, value) tuples or (keyword, value,
comment) tuples.
Arbitrary arguments are also accepted, in which case the update() is
called again with the kwargs dict as its only argument. That is,
::
>>> header.update(NAXIS1=100, NAXIS2=100)
is equivalent to::
header.update({'NAXIS1': 100, 'NAXIS2': 100})
.. warning::
As this method works similarly to `dict.update` it is very
different from the ``Header.update()`` method in Astropy v0.1.
Use of the old API was
**deprecated** for a long time and is now removed. Most uses of the
old API can be replaced as follows:
* Replace ::
header.update(keyword, value)
with ::
header[keyword] = value
* Replace ::
header.update(keyword, value, comment=comment)
with ::
header[keyword] = (value, comment)
* Replace ::
header.update(keyword, value, before=before_keyword)
with ::
header.insert(before_keyword, (keyword, value))
* Replace ::
header.update(keyword, value, after=after_keyword)
with ::
header.insert(after_keyword, (keyword, value),
after=True)
See also :meth:`Header.set` which is a new method that provides an
interface similar to the old ``Header.update()`` and may help make
transition a little easier.
"""
if args:
other = args[0]
else:
other = None
def update_from_dict(k, v):
if not isinstance(v, tuple):
card = Card(k, v)
elif 0 < len(v) <= 2:
card = Card(*((k,) + v))
else:
raise ValueError(
'Header update value for key %r is invalid; the '
'value must be either a scalar, a 1-tuple '
'containing the scalar value, or a 2-tuple '
'containing the value and a comment string.' % k)
self._update(card)
if other is None:
pass
elif isinstance(other, Header):
for card in other.cards:
self._update(card)
elif hasattr(other, 'items'):
for k, v in other.items():
update_from_dict(k, v)
elif hasattr(other, 'keys'):
for k in other.keys():
update_from_dict(k, other[k])
else:
for idx, card in enumerate(other):
if isinstance(card, Card):
self._update(card)
elif isinstance(card, tuple) and (1 < len(card) <= 3):
self._update(Card(*card))
else:
raise ValueError(
'Header update sequence item #{} is invalid; '
'the item must either be a 2-tuple containing '
'a keyword and value, or a 3-tuple containing '
'a keyword, value, and comment string.'.format(idx))
if kwargs:
self.update(kwargs)
def append(self, card=None, useblanks=True, bottom=False, end=False):
"""
Appends a new keyword+value card to the end of the Header, similar
to `list.append`.
By default if the last cards in the Header have commentary keywords,
this will append the new keyword before the commentary (unless the new
keyword is also commentary).
Also differs from `list.append` in that it can be called with no
arguments: In this case a blank card is appended to the end of the
Header. In the case all the keyword arguments are ignored.
Parameters
----------
card : str, tuple
A keyword or a (keyword, value, [comment]) tuple representing a
single header card; the comment is optional in which case a
2-tuple may be used
useblanks : bool, optional
If there are blank cards at the end of the Header, replace the
first blank card so that the total number of cards in the Header
does not increase. Otherwise preserve the number of blank cards.
bottom : bool, optional
If True, instead of appending after the last non-commentary card,
append after the last non-blank card.
end : bool, optional
If True, ignore the useblanks and bottom options, and append at the
very end of the Header.
"""
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif card is None:
card = Card()
elif not isinstance(card, Card):
raise ValueError(
'The value appended to a Header must be either a keyword or '
'(keyword, value, [comment]) tuple; got: {!r}'.format(card))
if not end and card.is_blank:
# Blank cards should always just be appended to the end
end = True
if end:
self._cards.append(card)
idx = len(self._cards) - 1
else:
idx = len(self._cards) - 1
while idx >= 0 and self._cards[idx].is_blank:
idx -= 1
if not bottom and card.keyword not in Card._commentary_keywords:
while (idx >= 0 and
self._cards[idx].keyword in Card._commentary_keywords):
idx -= 1
idx += 1
self._cards.insert(idx, card)
self._updateindices(idx)
keyword = Card.normalize_keyword(card.keyword)
self._keyword_indices[keyword].append(idx)
if card.field_specifier is not None:
self._rvkc_indices[card.rawkeyword].append(idx)
if not end:
# If the appended card was a commentary card, and it was appended
# before existing cards with the same keyword, the indices for
# cards with that keyword may have changed
if not bottom and card.keyword in Card._commentary_keywords:
self._keyword_indices[keyword].sort()
# Finally, if useblanks, delete a blank cards from the end
if useblanks and self._countblanks():
# Don't do this unless there is at least one blanks at the end
# of the header; we need to convert the card to its string
# image to see how long it is. In the vast majority of cases
# this will just be 80 (Card.length) but it may be longer for
# CONTINUE cards
self._useblanks(len(str(card)) // Card.length)
self._modified = True
def extend(self, cards, strip=True, unique=False, update=False,
update_first=False, useblanks=True, bottom=False, end=False):
"""
Appends multiple keyword+value cards to the end of the header, similar
to `list.extend`.
Parameters
----------
cards : iterable
An iterable of (keyword, value, [comment]) tuples; see
`Header.append`.
strip : bool, optional
Remove any keywords that have meaning only to specific types of
HDUs, so that only more general keywords are added from extension
Header or Card list (default: `True`).
unique : bool, optional
If `True`, ensures that no duplicate keywords are appended;
keywords already in this header are simply discarded. The
exception is commentary keywords (COMMENT, HISTORY, etc.): they are
only treated as duplicates if their values match.
update : bool, optional
If `True`, update the current header with the values and comments
from duplicate keywords in the input header. This supersedes the
``unique`` argument. Commentary keywords are treated the same as
if ``unique=True``.
update_first : bool, optional
If the first keyword in the header is 'SIMPLE', and the first
keyword in the input header is 'XTENSION', the 'SIMPLE' keyword is
replaced by the 'XTENSION' keyword. Likewise if the first keyword
in the header is 'XTENSION' and the first keyword in the input
header is 'SIMPLE', the 'XTENSION' keyword is replaced by the
'SIMPLE' keyword. This behavior is otherwise dumb as to whether or
not the resulting header is a valid primary or extension header.
This is mostly provided to support backwards compatibility with the
old ``Header.fromTxtFile`` method, and only applies if
``update=True``.
useblanks, bottom, end : bool, optional
These arguments are passed to :meth:`Header.append` while appending
new cards to the header.
"""
temp = self.__class__(cards)
if strip:
temp.strip()
if len(self):
first = self._cards[0].keyword
else:
first = None
# We don't immediately modify the header, because first we need to sift
# out any duplicates in the new header prior to adding them to the
# existing header, but while *allowing* duplicates from the header
# being extended from (see ticket #156)
extend_cards = []
for idx, card in enumerate(temp.cards):
keyword = card.keyword
if keyword not in Card._commentary_keywords:
if unique and not update and keyword in self:
continue
elif update:
if idx == 0 and update_first:
# Dumbly update the first keyword to either SIMPLE or
# XTENSION as the case may be, as was in the case in
# Header.fromTxtFile
if ((keyword == 'SIMPLE' and first == 'XTENSION') or
(keyword == 'XTENSION' and first == 'SIMPLE')):
del self[0]
self.insert(0, card)
else:
self[keyword] = (card.value, card.comment)
elif keyword in self:
self[keyword] = (card.value, card.comment)
else:
extend_cards.append(card)
else:
extend_cards.append(card)
else:
if (unique or update) and keyword in self:
if card.is_blank:
extend_cards.append(card)
continue
for value in self[keyword]:
if value == card.value:
break
else:
extend_cards.append(card)
else:
extend_cards.append(card)
for card in extend_cards:
self.append(card, useblanks=useblanks, bottom=bottom, end=end)
def count(self, keyword):
"""
Returns the count of the given keyword in the header, similar to
`list.count` if the Header object is treated as a list of keywords.
Parameters
----------
keyword : str
The keyword to count instances of in the header
"""
keyword = Card.normalize_keyword(keyword)
# We have to look before we leap, since otherwise _keyword_indices,
# being a defaultdict, will create an entry for the nonexistent keyword
if keyword not in self._keyword_indices:
raise KeyError(f"Keyword {keyword!r} not found.")
return len(self._keyword_indices[keyword])
def index(self, keyword, start=None, stop=None):
"""
Returns the index if the first instance of the given keyword in the
header, similar to `list.index` if the Header object is treated as a
list of keywords.
Parameters
----------
keyword : str
The keyword to look up in the list of all keywords in the header
start : int, optional
The lower bound for the index
stop : int, optional
The upper bound for the index
"""
if start is None:
start = 0
if stop is None:
stop = len(self._cards)
if stop < start:
step = -1
else:
step = 1
norm_keyword = Card.normalize_keyword(keyword)
for idx in range(start, stop, step):
if self._cards[idx].keyword.upper() == norm_keyword:
return idx
else:
raise ValueError(f'The keyword {keyword!r} is not in the header.')
def insert(self, key, card, useblanks=True, after=False):
"""
Inserts a new keyword+value card into the Header at a given location,
similar to `list.insert`.
Parameters
----------
key : int, str, or tuple
The index into the list of header keywords before which the
new keyword should be inserted, or the name of a keyword before
which the new keyword should be inserted. Can also accept a
(keyword, index) tuple for inserting around duplicate keywords.
card : str, tuple
A keyword or a (keyword, value, [comment]) tuple; see
`Header.append`
useblanks : bool, optional
If there are blank cards at the end of the Header, replace the
first blank card so that the total number of cards in the Header
does not increase. Otherwise preserve the number of blank cards.
after : bool, optional
If set to `True`, insert *after* the specified index or keyword,
rather than before it. Defaults to `False`.
"""
if not isinstance(key, numbers.Integral):
# Don't pass through ints to _cardindex because it will not take
# kindly to indices outside the existing number of cards in the
# header, which insert needs to be able to support (for example
# when inserting into empty headers)
idx = self._cardindex(key)
else:
idx = key
if after:
if idx == -1:
idx = len(self._cards)
else:
idx += 1
if idx >= len(self._cards):
# This is just an append (Though it must be an append absolutely to
# the bottom, ignoring blanks, etc.--the point of the insert method
# is that you get exactly what you asked for with no surprises)
self.append(card, end=True)
return
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif not isinstance(card, Card):
raise ValueError(
'The value inserted into a Header must be either a keyword or '
'(keyword, value, [comment]) tuple; got: {!r}'.format(card))
self._cards.insert(idx, card)
keyword = card.keyword
# If idx was < 0, determine the actual index according to the rules
# used by list.insert()
if idx < 0:
idx += len(self._cards) - 1
if idx < 0:
idx = 0
# All the keyword indices above the insertion point must be updated
self._updateindices(idx)
keyword = Card.normalize_keyword(keyword)
self._keyword_indices[keyword].append(idx)
count = len(self._keyword_indices[keyword])
if count > 1:
# There were already keywords with this same name
if keyword not in Card._commentary_keywords:
warnings.warn(
'A {!r} keyword already exists in this header. Inserting '
'duplicate keyword.'.format(keyword), AstropyUserWarning)
self._keyword_indices[keyword].sort()
if card.field_specifier is not None:
# Update the index of RVKC as well
rvkc_indices = self._rvkc_indices[card.rawkeyword]
rvkc_indices.append(idx)
rvkc_indices.sort()
if useblanks:
self._useblanks(len(str(card)) // Card.length)
self._modified = True
def remove(self, keyword, ignore_missing=False, remove_all=False):
"""
Removes the first instance of the given keyword from the header similar
to `list.remove` if the Header object is treated as a list of keywords.
Parameters
----------
keyword : str
The keyword of which to remove the first instance in the header.
ignore_missing : bool, optional
When True, ignores missing keywords. Otherwise, if the keyword
is not present in the header a KeyError is raised.
remove_all : bool, optional
When True, all instances of keyword will be removed.
Otherwise only the first instance of the given keyword is removed.
"""
keyword = Card.normalize_keyword(keyword)
if keyword in self._keyword_indices:
del self[self._keyword_indices[keyword][0]]
if remove_all:
while keyword in self._keyword_indices:
del self[self._keyword_indices[keyword][0]]
elif not ignore_missing:
raise KeyError(f"Keyword '{keyword}' not found.")
def rename_keyword(self, oldkeyword, newkeyword, force=False):
"""
Rename a card's keyword in the header.
Parameters
----------
oldkeyword : str or int
Old keyword or card index
newkeyword : str
New keyword
force : bool, optional
When `True`, if the new keyword already exists in the header, force
the creation of a duplicate keyword. Otherwise a
`ValueError` is raised.
"""
oldkeyword = Card.normalize_keyword(oldkeyword)
newkeyword = Card.normalize_keyword(newkeyword)
if newkeyword == 'CONTINUE':
raise ValueError('Can not rename to CONTINUE')
if (newkeyword in Card._commentary_keywords or
oldkeyword in Card._commentary_keywords):
if not (newkeyword in Card._commentary_keywords and
oldkeyword in Card._commentary_keywords):
raise ValueError('Regular and commentary keys can not be '
'renamed to each other.')
elif not force and newkeyword in self:
raise ValueError(f'Intended keyword {newkeyword} already exists in header.')
idx = self.index(oldkeyword)
card = self._cards[idx]
del self[idx]
self.insert(idx, (newkeyword, card.value, card.comment))
def add_history(self, value, before=None, after=None):
"""
Add a ``HISTORY`` card.
Parameters
----------
value : str
History text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary('HISTORY', value, before=before, after=after)
def add_comment(self, value, before=None, after=None):
"""
Add a ``COMMENT`` card.
Parameters
----------
value : str
Text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary('COMMENT', value, before=before, after=after)
def add_blank(self, value='', before=None, after=None):
"""
Add a blank card.
Parameters
----------
value : str, optional
Text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary('', value, before=before, after=after)
def strip(self):
"""
Strip cards specific to a certain kind of header.
Strip cards like ``SIMPLE``, ``BITPIX``, etc. so the rest of
the header can be used to reconstruct another kind of header.
"""
# TODO: Previously this only deleted some cards specific to an HDU if
# _hdutype matched that type. But it seemed simple enough to just
# delete all desired cards anyways, and just ignore the KeyErrors if
# they don't exist.
# However, it might be desirable to make this extendable somehow--have
# a way for HDU classes to specify some headers that are specific only
# to that type, and should be removed otherwise.
naxis = self.get('NAXIS', 0)
tfields = self.get('TFIELDS', 0)
for idx in range(naxis):
self.remove('NAXIS' + str(idx + 1), ignore_missing=True)
for name in ('TFORM', 'TSCAL', 'TZERO', 'TNULL', 'TTYPE',
'TUNIT', 'TDISP', 'TDIM', 'THEAP', 'TBCOL'):
for idx in range(tfields):
self.remove(name + str(idx + 1), ignore_missing=True)
for name in ('SIMPLE', 'XTENSION', 'BITPIX', 'NAXIS', 'EXTEND',
'PCOUNT', 'GCOUNT', 'GROUPS', 'BSCALE', 'BZERO',
'TFIELDS'):
self.remove(name, ignore_missing=True)
@property
def data_size(self):
"""
Return the size (in bytes) of the data portion following the `Header`.
"""
return _hdr_data_size(self)
@property
def data_size_padded(self):
"""
Return the size (in bytes) of the data portion following the `Header`
including padding.
"""
size = self.data_size
return size + _pad_length(size)
def _update(self, card):
"""
The real update code. If keyword already exists, its value and/or
comment will be updated. Otherwise a new card will be appended.
This will not create a duplicate keyword except in the case of
commentary cards. The only other way to force creation of a duplicate
is to use the insert(), append(), or extend() methods.
"""
keyword, value, comment = card
# Lookups for existing/known keywords are case-insensitive
keyword = keyword.strip().upper()
if keyword.startswith('HIERARCH '):
keyword = keyword[9:]
if (keyword not in Card._commentary_keywords and
keyword in self._keyword_indices):
# Easy; just update the value/comment
idx = self._keyword_indices[keyword][0]
existing_card = self._cards[idx]
existing_card.value = value
if comment is not None:
# '' should be used to explicitly blank a comment
existing_card.comment = comment
if existing_card._modified:
self._modified = True
elif keyword in Card._commentary_keywords:
cards = self._splitcommentary(keyword, value)
if keyword in self._keyword_indices:
# Append after the last keyword of the same type
idx = self.index(keyword, start=len(self) - 1, stop=-1)
isblank = not (keyword or value or comment)
for c in reversed(cards):
self.insert(idx + 1, c, useblanks=(not isblank))
else:
for c in cards:
self.append(c, bottom=True)
else:
# A new keyword! self.append() will handle updating _modified
self.append(card)
def _cardindex(self, key):
"""Returns an index into the ._cards list given a valid lookup key."""
# This used to just set key = (key, 0) and then go on to act as if the
# user passed in a tuple, but it's much more common to just be given a
# string as the key, so optimize more for that case
if isinstance(key, str):
keyword = key
n = 0
elif isinstance(key, numbers.Integral):
# If < 0, determine the actual index
if key < 0:
key += len(self._cards)
if key < 0 or key >= len(self._cards):
raise IndexError('Header index out of range.')
return key
elif isinstance(key, slice):
return key
elif isinstance(key, tuple):
if (len(key) != 2 or not isinstance(key[0], str) or
not isinstance(key[1], numbers.Integral)):
raise ValueError(
'Tuple indices must be 2-tuples consisting of a '
'keyword string and an integer index.')
keyword, n = key
else:
raise ValueError(
'Header indices must be either a string, a 2-tuple, or '
'an integer.')
keyword = Card.normalize_keyword(keyword)
# Returns the index into _cards for the n-th card with the given
# keyword (where n is 0-based)
indices = self._keyword_indices.get(keyword, None)
if keyword and not indices:
if len(keyword) > KEYWORD_LENGTH or '.' in keyword:
raise KeyError(f"Keyword {keyword!r} not found.")
else:
# Maybe it's a RVKC?
indices = self._rvkc_indices.get(keyword, None)
if not indices:
raise KeyError(f"Keyword {keyword!r} not found.")
try:
return indices[n]
except IndexError:
raise IndexError('There are only {} {!r} cards in the '
'header.'.format(len(indices), keyword))
def _keyword_from_index(self, idx):
"""
Given an integer index, return the (keyword, repeat) tuple that index
refers to. For most keywords the repeat will always be zero, but it
may be greater than zero for keywords that are duplicated (especially
commentary keywords).
In a sense this is the inverse of self.index, except that it also
supports duplicates.
"""
if idx < 0:
idx += len(self._cards)
keyword = self._cards[idx].keyword
keyword = Card.normalize_keyword(keyword)
repeat = self._keyword_indices[keyword].index(idx)
return keyword, repeat
def _relativeinsert(self, card, before=None, after=None, replace=False):
"""
Inserts a new card before or after an existing card; used to
implement support for the legacy before/after keyword arguments to
Header.update().
If replace=True, move an existing card with the same keyword.
"""
if before is None:
insertionkey = after
else:
insertionkey = before
def get_insertion_idx():
if not (isinstance(insertionkey, numbers.Integral) and
insertionkey >= len(self._cards)):
idx = self._cardindex(insertionkey)
else:
idx = insertionkey
if before is None:
idx += 1
return idx
if replace:
# The card presumably already exists somewhere in the header.
# Check whether or not we actually have to move it; if it does need
# to be moved we just delete it and then it will be reinserted
# below
old_idx = self._cardindex(card.keyword)
insertion_idx = get_insertion_idx()
if (insertion_idx >= len(self._cards) and
old_idx == len(self._cards) - 1):
# The card would be appended to the end, but it's already at
# the end
return
if before is not None:
if old_idx == insertion_idx - 1:
return
elif after is not None and old_idx == insertion_idx:
return
del self[old_idx]
# Even if replace=True, the insertion idx may have changed since the
# old card was deleted
idx = get_insertion_idx()
if card[0] in Card._commentary_keywords:
cards = reversed(self._splitcommentary(card[0], card[1]))
else:
cards = [card]
for c in cards:
self.insert(idx, c)
def _updateindices(self, idx, increment=True):
"""
For all cards with index above idx, increment or decrement its index
value in the keyword_indices dict.
"""
if idx > len(self._cards):
# Save us some effort
return
increment = 1 if increment else -1
for index_sets in (self._keyword_indices, self._rvkc_indices):
for indices in index_sets.values():
for jdx, keyword_index in enumerate(indices):
if keyword_index >= idx:
indices[jdx] += increment
def _countblanks(self):
"""Returns the number of blank cards at the end of the Header."""
for idx in range(1, len(self._cards)):
if not self._cards[-idx].is_blank:
return idx - 1
return 0
def _useblanks(self, count):
for _ in range(count):
if self._cards[-1].is_blank:
del self[-1]
else:
break
def _haswildcard(self, keyword):
"""Return `True` if the input keyword contains a wildcard pattern."""
return (isinstance(keyword, str) and
(keyword.endswith('...') or '*' in keyword or '?' in keyword))
def _wildcardmatch(self, pattern):
"""
Returns a list of indices of the cards matching the given wildcard
pattern.
* '*' matches 0 or more characters
* '?' matches a single character
* '...' matches 0 or more of any non-whitespace character
"""
pattern = pattern.replace('*', r'.*').replace('?', r'.')
pattern = pattern.replace('...', r'\S*') + '$'
pattern_re = re.compile(pattern, re.I)
return [idx for idx, card in enumerate(self._cards)
if pattern_re.match(card.keyword)]
def _set_slice(self, key, value, target):
"""
Used to implement Header.__setitem__ and CardAccessor.__setitem__.
"""
if isinstance(key, slice) or self._haswildcard(key):
if isinstance(key, slice):
indices = range(*key.indices(len(target)))
else:
indices = self._wildcardmatch(key)
if isinstance(value, str) or not isiterable(value):
value = itertools.repeat(value, len(indices))
for idx, val in zip(indices, value):
target[idx] = val
return True
return False
def _splitcommentary(self, keyword, value):
"""
Given a commentary keyword and value, returns a list of the one or more
cards needed to represent the full value. This is primarily used to
create the multiple commentary cards needed to represent a long value
that won't fit into a single commentary card.
"""
# The maximum value in each card can be the maximum card length minus
# the maximum key length (which can include spaces if they key length
# less than 8
maxlen = Card.length - KEYWORD_LENGTH
valuestr = str(value)
if len(valuestr) <= maxlen:
# The value can fit in a single card
cards = [Card(keyword, value)]
else:
# The value must be split across multiple consecutive commentary
# cards
idx = 0
cards = []
while idx < len(valuestr):
cards.append(Card(keyword, valuestr[idx:idx + maxlen]))
idx += maxlen
return cards
def _add_commentary(self, key, value, before=None, after=None):
"""
Add a commentary card.
If ``before`` and ``after`` are `None`, add to the last occurrence
of cards of the same name (except blank card). If there is no
card (or blank card), append at the end.
"""
if before is not None or after is not None:
self._relativeinsert((key, value), before=before,
after=after)
else:
self[key] = value
collections.abc.MutableSequence.register(Header)
collections.abc.MutableMapping.register(Header)
class _DelayedHeader:
"""
Descriptor used to create the Header object from the header string that
was stored in HDU._header_str when parsing the file.
"""
def __get__(self, obj, owner=None):
try:
return obj.__dict__['_header']
except KeyError:
if obj._header_str is not None:
hdr = Header.fromstring(obj._header_str)
obj._header_str = None
else:
raise AttributeError("'{}' object has no attribute '_header'"
.format(obj.__class__.__name__))
obj.__dict__['_header'] = hdr
return hdr
def __set__(self, obj, val):
obj.__dict__['_header'] = val
def __delete__(self, obj):
del obj.__dict__['_header']
class _BasicHeaderCards:
"""
This class allows to access cards with the _BasicHeader.cards attribute.
This is needed because during the HDU class detection, some HDUs uses
the .cards interface. Cards cannot be modified here as the _BasicHeader
object will be deleted once the HDU object is created.
"""
def __init__(self, header):
self.header = header
def __getitem__(self, key):
# .cards is a list of cards, so key here is an integer.
# get the keyword name from its index.
key = self.header._keys[key]
# then we get the card from the _BasicHeader._cards list, or parse it
# if needed.
try:
return self.header._cards[key]
except KeyError:
cardstr = self.header._raw_cards[key]
card = Card.fromstring(cardstr)
self.header._cards[key] = card
return card
class _BasicHeader(collections.abc.Mapping):
"""This class provides a fast header parsing, without all the additional
features of the Header class. Here only standard keywords are parsed, no
support for CONTINUE, HIERARCH, COMMENT, HISTORY, or rvkc.
The raw card images are stored and parsed only if needed. The idea is that
to create the HDU objects, only a small subset of standard cards is needed.
Once a card is parsed, which is deferred to the Card class, the Card object
is kept in a cache. This is useful because a small subset of cards is used
a lot in the HDU creation process (NAXIS, XTENSION, ...).
"""
def __init__(self, cards):
# dict of (keywords, card images)
self._raw_cards = cards
self._keys = list(cards.keys())
# dict of (keyword, Card object) storing the parsed cards
self._cards = {}
# the _BasicHeaderCards object allows to access Card objects from
# keyword indices
self.cards = _BasicHeaderCards(self)
self._modified = False
def __getitem__(self, key):
if isinstance(key, numbers.Integral):
key = self._keys[key]
try:
return self._cards[key].value
except KeyError:
# parse the Card and store it
cardstr = self._raw_cards[key]
self._cards[key] = card = Card.fromstring(cardstr)
return card.value
def __len__(self):
return len(self._raw_cards)
def __iter__(self):
return iter(self._raw_cards)
def index(self, keyword):
return self._keys.index(keyword)
@property
def data_size(self):
"""
Return the size (in bytes) of the data portion following the `Header`.
"""
return _hdr_data_size(self)
@property
def data_size_padded(self):
"""
Return the size (in bytes) of the data portion following the `Header`
including padding.
"""
size = self.data_size
return size + _pad_length(size)
@classmethod
def fromfile(cls, fileobj):
"""The main method to parse a FITS header from a file. The parsing is
done with the parse_header function implemented in Cython."""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'rb')
close_file = True
try:
header_str, cards = parse_header(fileobj)
_check_padding(header_str, BLOCK_SIZE, False)
return header_str, cls(cards)
finally:
if close_file:
fileobj.close()
class _CardAccessor:
"""
This is a generic class for wrapping a Header in such a way that you can
use the header's slice/filtering capabilities to return a subset of cards
and do something with them.
This is sort of the opposite notion of the old CardList class--whereas
Header used to use CardList to get lists of cards, this uses Header to get
lists of cards.
"""
# TODO: Consider giving this dict/list methods like Header itself
def __init__(self, header):
self._header = header
def __repr__(self):
return '\n'.join(repr(c) for c in self._header._cards)
def __len__(self):
return len(self._header._cards)
def __iter__(self):
return iter(self._header._cards)
def __eq__(self, other):
# If the `other` item is a scalar we will still treat it as equal if
# this _CardAccessor only contains one item
if not isiterable(other) or isinstance(other, str):
if len(self) == 1:
other = [other]
else:
return False
for a, b in itertools.zip_longest(self, other):
if a != b:
return False
else:
return True
def __ne__(self, other):
return not (self == other)
def __getitem__(self, item):
if isinstance(item, slice) or self._header._haswildcard(item):
return self.__class__(self._header[item])
idx = self._header._cardindex(item)
return self._header._cards[idx]
def _setslice(self, item, value):
"""
Helper for implementing __setitem__ on _CardAccessor subclasses; slices
should always be handled in this same way.
"""
if isinstance(item, slice) or self._header._haswildcard(item):
if isinstance(item, slice):
indices = range(*item.indices(len(self)))
else:
indices = self._header._wildcardmatch(item)
if isinstance(value, str) or not isiterable(value):
value = itertools.repeat(value, len(indices))
for idx, val in zip(indices, value):
self[idx] = val
return True
return False
class _HeaderComments(_CardAccessor):
"""
A class used internally by the Header class for the Header.comments
attribute access.
This object can be used to display all the keyword comments in the Header,
or look up the comments on specific keywords. It allows all the same forms
of keyword lookup as the Header class itself, but returns comments instead
of values.
"""
def __iter__(self):
for card in self._header._cards:
yield card.comment
def __repr__(self):
"""Returns a simple list of all keywords and their comments."""
keyword_length = KEYWORD_LENGTH
for card in self._header._cards:
keyword_length = max(keyword_length, len(card.keyword))
return '\n'.join('{:>{len}} {}'.format(c.keyword, c.comment,
len=keyword_length)
for c in self._header._cards)
def __getitem__(self, item):
"""
Slices and filter strings return a new _HeaderComments containing the
returned cards. Otherwise the comment of a single card is returned.
"""
item = super().__getitem__(item)
if isinstance(item, _HeaderComments):
# The item key was a slice
return item
return item.comment
def __setitem__(self, item, comment):
"""
Set/update the comment on specified card or cards.
Slice/filter updates work similarly to how Header.__setitem__ works.
"""
if self._header._set_slice(item, comment, self):
return
# In this case, key/index errors should be raised; don't update
# comments of nonexistent cards
idx = self._header._cardindex(item)
value = self._header[idx]
self._header[idx] = (value, comment)
class _HeaderCommentaryCards(_CardAccessor):
"""
This is used to return a list-like sequence over all the values in the
header for a given commentary keyword, such as HISTORY.
"""
def __init__(self, header, keyword=''):
super().__init__(header)
self._keyword = keyword
self._count = self._header.count(self._keyword)
self._indices = slice(self._count).indices(self._count)
# __len__ and __iter__ need to be overridden from the base class due to the
# different approach this class has to take for slicing
def __len__(self):
return len(range(*self._indices))
def __iter__(self):
for idx in range(*self._indices):
yield self._header[(self._keyword, idx)]
def __repr__(self):
return '\n'.join(str(x) for x in self)
def __getitem__(self, idx):
if isinstance(idx, slice):
n = self.__class__(self._header, self._keyword)
n._indices = idx.indices(self._count)
return n
elif not isinstance(idx, numbers.Integral):
raise ValueError(f'{self._keyword} index must be an integer')
idx = list(range(*self._indices))[idx]
return self._header[(self._keyword, idx)]
def __setitem__(self, item, value):
"""
Set the value of a specified commentary card or cards.
Slice/filter updates work similarly to how Header.__setitem__ works.
"""
if self._header._set_slice(item, value, self):
return
# In this case, key/index errors should be raised; don't update
# comments of nonexistent cards
self._header[(self._keyword, item)] = value
def _block_size(sep):
"""
Determine the size of a FITS header block if a non-blank separator is used
between cards.
"""
return BLOCK_SIZE + (len(sep) * (BLOCK_SIZE // Card.length - 1))
def _pad_length(stringlen):
"""Bytes needed to pad the input stringlen to the next FITS block."""
return (BLOCK_SIZE - (stringlen % BLOCK_SIZE)) % BLOCK_SIZE
def _check_padding(header_str, block_size, is_eof, check_block_size=True):
# Strip any zero-padding (see ticket #106)
if header_str and header_str[-1] == '\0':
if is_eof and header_str.strip('\0') == '':
# TODO: Pass this warning to validation framework
warnings.warn(
'Unexpected extra padding at the end of the file. This '
'padding may not be preserved when saving changes.',
AstropyUserWarning)
raise EOFError()
else:
# Replace the illegal null bytes with spaces as required by
# the FITS standard, and issue a nasty warning
# TODO: Pass this warning to validation framework
warnings.warn(
'Header block contains null bytes instead of spaces for '
'padding, and is not FITS-compliant. Nulls may be '
'replaced with spaces upon writing.', AstropyUserWarning)
header_str.replace('\0', ' ')
if check_block_size and (len(header_str) % block_size) != 0:
# This error message ignores the length of the separator for
# now, but maybe it shouldn't?
actual_len = len(header_str) - block_size + BLOCK_SIZE
# TODO: Pass this error to validation framework
raise ValueError(f'Header size is not multiple of {BLOCK_SIZE}: {actual_len}')
def _hdr_data_size(header):
"""Calculate the data size (in bytes) following the given `Header`"""
size = 0
naxis = header.get('NAXIS', 0)
if naxis > 0:
size = 1
for idx in range(naxis):
size = size * header['NAXIS' + str(idx + 1)]
bitpix = header['BITPIX']
gcount = header.get('GCOUNT', 1)
pcount = header.get('PCOUNT', 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size
|
8699e51b380074a7989cea3bdb3a9c623618417006dff5fed10c21a8a2e5c004 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import itertools
import io
import mmap
import operator
import os
import platform
import signal
import sys
import tempfile
import textwrap
import threading
import warnings
import weakref
from contextlib import contextmanager, suppress
from functools import wraps
import numpy as np
from packaging.version import Version
from astropy.utils import data
from astropy.utils.exceptions import AstropyUserWarning
path_like = (str, os.PathLike)
cmp = lambda a, b: (a > b) - (a < b)
all_integer_types = (int, np.integer)
class NotifierMixin:
"""
Mixin class that provides services by which objects can register
listeners to changes on that object.
All methods provided by this class are underscored, since this is intended
for internal use to communicate between classes in a generic way, and is
not machinery that should be exposed to users of the classes involved.
Use the ``_add_listener`` method to register a listener on an instance of
the notifier. This registers the listener with a weak reference, so if
no other references to the listener exist it is automatically dropped from
the list and does not need to be manually removed.
Call the ``_notify`` method on the notifier to update all listeners
upon changes. ``_notify('change_type', *args, **kwargs)`` results
in calling ``listener._update_change_type(*args, **kwargs)`` on all
listeners subscribed to that notifier.
If a particular listener does not have the appropriate update method
it is ignored.
Examples
--------
>>> class Widget(NotifierMixin):
... state = 1
... def __init__(self, name):
... self.name = name
... def update_state(self):
... self.state += 1
... self._notify('widget_state_changed', self)
...
>>> class WidgetListener:
... def _update_widget_state_changed(self, widget):
... print('Widget {0} changed state to {1}'.format(
... widget.name, widget.state))
...
>>> widget = Widget('fred')
>>> listener = WidgetListener()
>>> widget._add_listener(listener)
>>> widget.update_state()
Widget fred changed state to 2
"""
_listeners = None
def _add_listener(self, listener):
"""
Add an object to the list of listeners to notify of changes to this
object. This adds a weakref to the list of listeners that is
removed from the listeners list when the listener has no other
references to it.
"""
if self._listeners is None:
self._listeners = weakref.WeakValueDictionary()
self._listeners[id(listener)] = listener
def _remove_listener(self, listener):
"""
Removes the specified listener from the listeners list. This relies
on object identity (i.e. the ``is`` operator).
"""
if self._listeners is None:
return
with suppress(KeyError):
del self._listeners[id(listener)]
def _notify(self, notification, *args, **kwargs):
"""
Notify all listeners of some particular state change by calling their
``_update_<notification>`` method with the given ``*args`` and
``**kwargs``.
The notification does not by default include the object that actually
changed (``self``), but it certainly may if required.
"""
if self._listeners is None:
return
method_name = f'_update_{notification}'
for listener in self._listeners.valuerefs():
# Use valuerefs instead of itervaluerefs; see
# https://github.com/astropy/astropy/issues/4015
listener = listener() # dereference weakref
if listener is None:
continue
if hasattr(listener, method_name):
method = getattr(listener, method_name)
if callable(method):
method(*args, **kwargs)
def __getstate__(self):
"""
Exclude listeners when saving the listener's state, since they may be
ephemeral.
"""
# TODO: This hasn't come up often, but if anyone needs to pickle HDU
# objects it will be necessary when HDU objects' states are restored to
# re-register themselves as listeners on their new column instances.
try:
state = super().__getstate__()
except AttributeError:
# Chances are the super object doesn't have a getstate
state = self.__dict__.copy()
state['_listeners'] = None
return state
def first(iterable):
"""
Returns the first item returned by iterating over an iterable object.
Example:
>>> a = [1, 2, 3]
>>> first(a)
1
"""
return next(iter(iterable))
def itersubclasses(cls, _seen=None):
"""
Generator over all subclasses of a given class, in depth first order.
>>> class A: pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)]
[...'tuple', ...'type', ...]
From http://code.activestate.com/recipes/576949/
"""
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in sorted(subs, key=operator.attrgetter('__name__')):
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def ignore_sigint(func):
"""
This decorator registers a custom SIGINT handler to catch and ignore SIGINT
until the wrapped function is completed.
"""
@wraps(func)
def wrapped(*args, **kwargs):
# Get the name of the current thread and determine if this is a single
# threaded application
curr_thread = threading.current_thread()
single_thread = (threading.active_count() == 1 and
curr_thread.name == 'MainThread')
class SigintHandler:
def __init__(self):
self.sigint_received = False
def __call__(self, signum, frame):
warnings.warn('KeyboardInterrupt ignored until {} is '
'complete!'.format(func.__name__),
AstropyUserWarning)
self.sigint_received = True
sigint_handler = SigintHandler()
# Define new signal interput handler
if single_thread:
# Install new handler
old_handler = signal.signal(signal.SIGINT, sigint_handler)
try:
func(*args, **kwargs)
finally:
if single_thread:
if old_handler is not None:
signal.signal(signal.SIGINT, old_handler)
else:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if sigint_handler.sigint_received:
raise KeyboardInterrupt
return wrapped
def pairwise(iterable):
"""Return the items of an iterable paired with its next item.
Ex: s -> (s0,s1), (s1,s2), (s2,s3), ....
"""
a, b = itertools.tee(iterable)
for _ in b:
# Just a little trick to advance b without having to catch
# StopIter if b happens to be empty
break
return zip(a, b)
def encode_ascii(s):
if isinstance(s, str):
return s.encode('ascii')
elif (isinstance(s, np.ndarray) and
issubclass(s.dtype.type, np.str_)):
ns = np.char.encode(s, 'ascii').view(type(s))
if ns.dtype.itemsize != s.dtype.itemsize / 4:
ns = ns.astype((np.bytes_, s.dtype.itemsize / 4))
return ns
elif (isinstance(s, np.ndarray) and
not issubclass(s.dtype.type, np.bytes_)):
raise TypeError('string operation on non-string array')
return s
def decode_ascii(s):
if isinstance(s, bytes):
try:
return s.decode('ascii')
except UnicodeDecodeError:
warnings.warn('non-ASCII characters are present in the FITS '
'file header and have been replaced by "?" '
'characters', AstropyUserWarning)
s = s.decode('ascii', errors='replace')
return s.replace('\ufffd', '?')
elif (isinstance(s, np.ndarray) and
issubclass(s.dtype.type, np.bytes_)):
# np.char.encode/decode annoyingly don't preserve the type of the
# array, hence the view() call
# It also doesn't necessarily preserve widths of the strings,
# hence the astype()
if s.size == 0:
# Numpy apparently also has a bug that if a string array is
# empty calling np.char.decode on it returns an empty float64
# array wth
dt = s.dtype.str.replace('S', 'U')
ns = np.array([], dtype=dt).view(type(s))
else:
ns = np.char.decode(s, 'ascii').view(type(s))
if ns.dtype.itemsize / 4 != s.dtype.itemsize:
ns = ns.astype((np.str_, s.dtype.itemsize))
return ns
elif (isinstance(s, np.ndarray) and
not issubclass(s.dtype.type, np.str_)):
# Don't silently pass through on non-string arrays; we don't want
# to hide errors where things that are not stringy are attempting
# to be decoded
raise TypeError('string operation on non-string array')
return s
def isreadable(f):
"""
Returns True if the file-like object can be read from. This is a common-
sense approximation of io.IOBase.readable.
"""
if hasattr(f, 'readable'):
return f.readable()
if hasattr(f, 'closed') and f.closed:
# This mimics the behavior of io.IOBase.readable
raise ValueError('I/O operation on closed file')
if not hasattr(f, 'read'):
return False
if hasattr(f, 'mode') and not any(c in f.mode for c in 'r+'):
return False
# Not closed, has a 'read()' method, and either has no known mode or a
# readable mode--should be good enough to assume 'readable'
return True
def iswritable(f):
"""
Returns True if the file-like object can be written to. This is a common-
sense approximation of io.IOBase.writable.
"""
if hasattr(f, 'writable'):
return f.writable()
if hasattr(f, 'closed') and f.closed:
# This mimics the behavior of io.IOBase.writable
raise ValueError('I/O operation on closed file')
if not hasattr(f, 'write'):
return False
if hasattr(f, 'mode') and not any(c in f.mode for c in 'wa+'):
return False
# Note closed, has a 'write()' method, and either has no known mode or a
# mode that supports writing--should be good enough to assume 'writable'
return True
def isfile(f):
"""
Returns True if the given object represents an OS-level file (that is,
``isinstance(f, file)``).
On Python 3 this also returns True if the given object is higher level
wrapper on top of a FileIO object, such as a TextIOWrapper.
"""
if isinstance(f, io.FileIO):
return True
elif hasattr(f, 'buffer'):
return isfile(f.buffer)
elif hasattr(f, 'raw'):
return isfile(f.raw)
return False
def fileobj_name(f):
"""
Returns the 'name' of file-like object *f*, if it has anything that could be
called its name. Otherwise f's class or type is returned. If f is a
string f itself is returned.
"""
if isinstance(f, (str, bytes)):
return f
elif isinstance(f, gzip.GzipFile):
# The .name attribute on GzipFiles does not always represent the name
# of the file being read/written--it can also represent the original
# name of the file being compressed
# See the documentation at
# https://docs.python.org/3/library/gzip.html#gzip.GzipFile
# As such, for gzip files only return the name of the underlying
# fileobj, if it exists
return fileobj_name(f.fileobj)
elif hasattr(f, 'name'):
return f.name
elif hasattr(f, 'filename'):
return f.filename
elif hasattr(f, '__class__'):
return str(f.__class__)
else:
return str(type(f))
def fileobj_closed(f):
"""
Returns True if the given file-like object is closed or if *f* is a string
(and assumed to be a pathname).
Returns False for all other types of objects, under the assumption that
they are file-like objects with no sense of a 'closed' state.
"""
if isinstance(f, path_like):
return True
if hasattr(f, 'closed'):
return f.closed
elif hasattr(f, 'fileobj') and hasattr(f.fileobj, 'closed'):
return f.fileobj.closed
elif hasattr(f, 'fp') and hasattr(f.fp, 'closed'):
return f.fp.closed
else:
return False
def fileobj_mode(f):
"""
Returns the 'mode' string of a file-like object if such a thing exists.
Otherwise returns None.
"""
# Go from most to least specific--for example gzip objects have a 'mode'
# attribute, but it's not analogous to the file.mode attribute
# gzip.GzipFile -like
if hasattr(f, 'fileobj') and hasattr(f.fileobj, 'mode'):
fileobj = f.fileobj
# astropy.io.fits._File -like, doesn't need additional checks because it's
# already validated
elif hasattr(f, 'fileobj_mode'):
return f.fileobj_mode
# PIL-Image -like investigate the fp (filebuffer)
elif hasattr(f, 'fp') and hasattr(f.fp, 'mode'):
fileobj = f.fp
# FILEIO -like (normal open(...)), keep as is.
elif hasattr(f, 'mode'):
fileobj = f
# Doesn't look like a file-like object, for example strings, urls or paths.
else:
return None
return _fileobj_normalize_mode(fileobj)
def _fileobj_normalize_mode(f):
"""Takes care of some corner cases in Python where the mode string
is either oddly formatted or does not truly represent the file mode.
"""
mode = f.mode
# Special case: Gzip modes:
if isinstance(f, gzip.GzipFile):
# GzipFiles can be either readonly or writeonly
if mode == gzip.READ:
return 'rb'
elif mode == gzip.WRITE:
return 'wb'
else:
return None # This shouldn't happen?
# Sometimes Python can produce modes like 'r+b' which will be normalized
# here to 'rb+'
if '+' in mode:
mode = mode.replace('+', '')
mode += '+'
return mode
def fileobj_is_binary(f):
"""
Returns True if the give file or file-like object has a file open in binary
mode. When in doubt, returns True by default.
"""
# This is kind of a hack for this to work correctly with _File objects,
# which, for the time being, are *always* binary
if hasattr(f, 'binary'):
return f.binary
if isinstance(f, io.TextIOBase):
return False
mode = fileobj_mode(f)
if mode:
return 'b' in mode
else:
return True
def translate(s, table, deletechars):
if deletechars:
table = table.copy()
for c in deletechars:
table[ord(c)] = None
return s.translate(table)
def fill(text, width, **kwargs):
"""
Like :func:`textwrap.wrap` but preserves existing paragraphs which
:func:`textwrap.wrap` does not otherwise handle well. Also handles section
headers.
"""
paragraphs = text.split('\n\n')
def maybe_fill(t):
if all(len(l) < width for l in t.splitlines()):
return t
else:
return textwrap.fill(t, width, **kwargs)
return '\n\n'.join(maybe_fill(p) for p in paragraphs)
# On MacOS X 10.8 and earlier, there is a bug that causes numpy.fromfile to
# fail when reading over 2Gb of data. If we detect these versions of MacOS X,
# we can instead read the data in chunks. To avoid performance penalties at
# import time, we defer the setting of this global variable until the first
# time it is needed.
CHUNKED_FROMFILE = None
def _array_from_file(infile, dtype, count):
"""Create a numpy array from a file or a file-like object."""
if isfile(infile):
global CHUNKED_FROMFILE
if CHUNKED_FROMFILE is None:
if (sys.platform == 'darwin' and
Version(platform.mac_ver()[0]) < Version('10.9')):
CHUNKED_FROMFILE = True
else:
CHUNKED_FROMFILE = False
if CHUNKED_FROMFILE:
chunk_size = int(1024 ** 3 / dtype.itemsize) # 1Gb to be safe
if count < chunk_size:
return np.fromfile(infile, dtype=dtype, count=count)
else:
array = np.empty(count, dtype=dtype)
for beg in range(0, count, chunk_size):
end = min(count, beg + chunk_size)
array[beg:end] = np.fromfile(infile, dtype=dtype, count=end - beg)
return array
else:
return np.fromfile(infile, dtype=dtype, count=count)
else:
# treat as file-like object with "read" method; this includes gzip file
# objects, because numpy.fromfile just reads the compressed bytes from
# their underlying file object, instead of the decompressed bytes
read_size = np.dtype(dtype).itemsize * count
s = infile.read(read_size)
array = np.ndarray(buffer=s, dtype=dtype, shape=(count,))
# copy is needed because np.frombuffer returns a read-only view of the
# underlying buffer
array = array.copy()
return array
_OSX_WRITE_LIMIT = (2 ** 32) - 1
_WIN_WRITE_LIMIT = (2 ** 31) - 1
def _array_to_file(arr, outfile):
"""
Write a numpy array to a file or a file-like object.
Parameters
----------
arr : ndarray
The Numpy array to write.
outfile : file-like
A file-like object such as a Python file object, an `io.BytesIO`, or
anything else with a ``write`` method. The file object must support
the buffer interface in its ``write``.
If writing directly to an on-disk file this delegates directly to
`ndarray.tofile`. Otherwise a slower Python implementation is used.
"""
try:
seekable = outfile.seekable()
except AttributeError:
seekable = False
if isfile(outfile) and seekable:
write = lambda a, f: a.tofile(f)
else:
write = _array_to_file_like
# Implements a workaround for a bug deep in OSX's stdlib file writing
# functions; on 64-bit OSX it is not possible to correctly write a number
# of bytes greater than 2 ** 32 and divisible by 4096 (or possibly 8192--
# whatever the default blocksize for the filesystem is).
# This issue should have a workaround in Numpy too, but hasn't been
# implemented there yet: https://github.com/astropy/astropy/issues/839
#
# Apparently Windows has its own fwrite bug:
# https://github.com/numpy/numpy/issues/2256
if (sys.platform == 'darwin' and arr.nbytes >= _OSX_WRITE_LIMIT + 1 and
arr.nbytes % 4096 == 0):
# chunksize is a count of elements in the array, not bytes
chunksize = _OSX_WRITE_LIMIT // arr.itemsize
elif sys.platform.startswith('win'):
chunksize = _WIN_WRITE_LIMIT // arr.itemsize
else:
# Just pass the whole array to the write routine
return write(arr, outfile)
# Write one chunk at a time for systems whose fwrite chokes on large
# writes.
idx = 0
arr = arr.view(np.ndarray).flatten()
while idx < arr.nbytes:
write(arr[idx:idx + chunksize], outfile)
idx += chunksize
def _array_to_file_like(arr, fileobj):
"""
Write a `~numpy.ndarray` to a file-like object (which is not supported by
`numpy.ndarray.tofile`).
"""
# If the array is empty, we can simply take a shortcut and return since
# there is nothing to write.
if len(arr) == 0:
return
if arr.flags.contiguous:
# It suffices to just pass the underlying buffer directly to the
# fileobj's write (assuming it supports the buffer interface). If
# it does not have the buffer interface, a TypeError should be returned
# in which case we can fall back to the other methods.
try:
fileobj.write(arr.data)
except TypeError:
pass
else:
return
if hasattr(np, 'nditer'):
# nditer version for non-contiguous arrays
for item in np.nditer(arr, order='C'):
fileobj.write(item.tobytes())
else:
# Slower version for Numpy versions without nditer;
# The problem with flatiter is it doesn't preserve the original
# byteorder
byteorder = arr.dtype.byteorder
if ((sys.byteorder == 'little' and byteorder == '>')
or (sys.byteorder == 'big' and byteorder == '<')):
for item in arr.flat:
fileobj.write(item.byteswap().tobytes())
else:
for item in arr.flat:
fileobj.write(item.tobytes())
def _write_string(f, s):
"""
Write a string to a file, encoding to ASCII if the file is open in binary
mode, or decoding if the file is open in text mode.
"""
# Assume if the file object doesn't have a specific mode, that the mode is
# binary
binmode = fileobj_is_binary(f)
if binmode and isinstance(s, str):
s = encode_ascii(s)
elif not binmode and not isinstance(f, str):
s = decode_ascii(s)
f.write(s)
def _convert_array(array, dtype):
"""
Converts an array to a new dtype--if the itemsize of the new dtype is
the same as the old dtype and both types are not numeric, a view is
returned. Otherwise a new array must be created.
"""
if array.dtype == dtype:
return array
elif (array.dtype.itemsize == dtype.itemsize and not
(np.issubdtype(array.dtype, np.number) and
np.issubdtype(dtype, np.number))):
# Includes a special case when both dtypes are at least numeric to
# account for old Trac ticket 218 (now inaccessible).
return array.view(dtype)
else:
return array.astype(dtype)
def _pseudo_zero(dtype):
"""
Given a numpy dtype, finds its "zero" point, which is exactly in the
middle of its range.
"""
# special case for int8
if dtype.kind == 'i' and dtype.itemsize == 1:
return -128
assert dtype.kind == 'u'
return 1 << (dtype.itemsize * 8 - 1)
def _is_pseudo_integer(dtype):
return (
(dtype.kind == 'u' and dtype.itemsize >= 2)
or (dtype.kind == 'i' and dtype.itemsize == 1)
)
def _is_int(val):
return isinstance(val, all_integer_types)
def _str_to_num(val):
"""Converts a given string to either an int or a float if necessary."""
try:
num = int(val)
except ValueError:
# If this fails then an exception should be raised anyways
num = float(val)
return num
def _words_group(s, width):
"""
Split a long string into parts where each part is no longer than ``strlen``
and no word is cut into two pieces. But if there are any single words
which are longer than ``strlen``, then they will be split in the middle of
the word.
"""
words = []
slen = len(s)
# appending one blank at the end always ensures that the "last" blank
# is beyond the end of the string
arr = np.frombuffer(s.encode('utf8') + b' ', dtype='S1')
# locations of the blanks
blank_loc = np.nonzero(arr == b' ')[0]
offset = 0
xoffset = 0
while True:
try:
loc = np.nonzero(blank_loc >= width + offset)[0][0]
except IndexError:
loc = len(blank_loc)
if loc > 0:
offset = blank_loc[loc - 1] + 1
else:
offset = -1
# check for one word longer than strlen, break in the middle
if offset <= xoffset:
offset = min(xoffset + width, slen)
# collect the pieces in a list
words.append(s[xoffset:offset])
if offset >= slen:
break
xoffset = offset
return words
def _tmp_name(input):
"""
Create a temporary file name which should not already exist. Use the
directory of the input file as the base name of the mkstemp() output.
"""
if input is not None:
input = os.path.dirname(input)
f, fn = tempfile.mkstemp(dir=input)
os.close(f)
return fn
def _get_array_mmap(array):
"""
If the array has an mmap.mmap at base of its base chain, return the mmap
object; otherwise return None.
"""
if isinstance(array, mmap.mmap):
return array
base = array
while hasattr(base, 'base') and base.base is not None:
if isinstance(base.base, mmap.mmap):
return base.base
base = base.base
@contextmanager
def _free_space_check(hdulist, dirname=None):
try:
yield
except OSError as exc:
error_message = ''
if not isinstance(hdulist, list):
hdulist = [hdulist, ]
if dirname is None:
dirname = os.path.dirname(hdulist._file.name)
if os.path.isdir(dirname):
free_space = data.get_free_space_in_dir(dirname)
hdulist_size = sum(hdu.size for hdu in hdulist)
if free_space < hdulist_size:
error_message = ("Not enough space on disk: requested {}, "
"available {}. ".format(hdulist_size, free_space))
for hdu in hdulist:
hdu._close()
raise OSError(error_message + str(exc))
def _extract_number(value, default):
"""
Attempts to extract an integer number from the given value. If the
extraction fails, the value of the 'default' argument is returned.
"""
try:
# The _str_to_num method converts the value to string/float
# so we need to perform one additional conversion to int on top
return int(_str_to_num(value))
except (TypeError, ValueError):
return default
def get_testdata_filepath(filename):
"""
Return a string representing the path to the file requested from the
io.fits test data set.
.. versionadded:: 2.0.3
Parameters
----------
filename : str
The filename of the test data file.
Returns
-------
filepath : str
The path to the requested file.
"""
return data.get_pkg_data_filename(
f'io/fits/tests/data/{filename}', 'astropy')
def _rstrip_inplace(array):
"""
Performs an in-place rstrip operation on string arrays. This is necessary
since the built-in `np.char.rstrip` in Numpy does not perform an in-place
calculation.
"""
# The following implementation convert the string to unsigned integers of
# the right length. Trailing spaces (which are represented as 32) are then
# converted to null characters (represented as zeros). To avoid creating
# large temporary mask arrays, we loop over chunks (attempting to do that
# on a 1-D version of the array; large memory may still be needed in the
# unlikely case that a string array has small first dimension and cannot
# be represented as a contiguous 1-D array in memory).
dt = array.dtype
if dt.kind not in 'SU':
raise TypeError("This function can only be used on string arrays")
# View the array as appropriate integers. The last dimension will
# equal the number of characters in each string.
bpc = 1 if dt.kind == 'S' else 4
dt_int = f"({dt.itemsize // bpc},){dt.byteorder}u{bpc}"
b = array.view(dt_int, np.ndarray)
# For optimal speed, work in chunks of the internal ufunc buffer size.
bufsize = np.getbufsize()
# Attempt to have the strings as a 1-D array to give the chunk known size.
# Note: the code will work if this fails; the chunks will just be larger.
if b.ndim > 2:
try:
b.shape = -1, b.shape[-1]
except AttributeError: # can occur for non-contiguous arrays
pass
for j in range(0, b.shape[0], bufsize):
c = b[j:j + bufsize]
# Mask which will tell whether we're in a sequence of trailing spaces.
mask = np.ones(c.shape[:-1], dtype=bool)
# Loop over the characters in the strings, in reverse order. We process
# the i-th character of all strings in the chunk at the same time. If
# the character is 32, this corresponds to a space, and we then change
# this to 0. We then construct a new mask to find rows where the
# i-th character is 0 (null) and the i-1-th is 32 (space) and repeat.
for i in range(-1, -c.shape[-1], -1):
mask &= c[..., i] == 32
c[..., i][mask] = 0
mask = c[..., i] == 0
return array
def _is_dask_array(data):
"""Check whether data is a dask array.
We avoid importing dask unless it is likely it is a dask array,
so that non-dask code is not slowed down.
"""
if not hasattr(data, 'compute'):
return False
try:
from dask.array import Array
except ImportError:
# If we cannot import dask, surely this cannot be a
# dask array!
return False
else:
return isinstance(data, Array)
|
c676913a7d38ae35e8e8cf9c17d313e17aa7aaeec11e5df00963bd016fc0452a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
""" An extensible ASCII table reader and writer.
core.py:
Core base classes and functions for reading and writing tables.
:Copyright: Smithsonian Astrophysical Observatory (2010)
:Author: Tom Aldcroft ([email protected])
"""
import copy
import csv
import functools
import itertools
import operator
import os
import re
import warnings
import inspect
import fnmatch
from collections import OrderedDict
from contextlib import suppress
from io import StringIO
import numpy
from astropy.utils.exceptions import AstropyWarning
from astropy.table import Table
from astropy.utils.data import get_readable_fileobj
from . import connect
from .docs import READ_DOCSTRING, WRITE_DOCSTRING
# Global dictionary mapping format arg to the corresponding Reader class
FORMAT_CLASSES = {}
# Similar dictionary for fast readers
FAST_CLASSES = {}
def _check_multidim_table(table, max_ndim):
"""Check that ``table`` has only columns with ndim <= ``max_ndim``
Currently ECSV is the only built-in format that supports output of arbitrary
N-d columns, but HTML supports 2-d.
"""
# No limit?
if max_ndim is None:
return
# Check for N-d columns
nd_names = [col.info.name for col in table.itercols() if len(col.shape) > max_ndim]
if nd_names:
raise ValueError(f'column(s) with dimension > {max_ndim} '
"cannot be be written with this format, try using 'ecsv' "
"(Enhanced CSV) format")
class CsvWriter:
"""
Internal class to replace the csv writer ``writerow`` and ``writerows``
functions so that in the case of ``delimiter=' '`` and
``quoting=csv.QUOTE_MINIMAL``, the output field value is quoted for empty
fields (when value == '').
This changes the API slightly in that the writerow() and writerows()
methods return the output written string instead of the length of
that string.
Examples
--------
>>> from astropy.io.ascii.core import CsvWriter
>>> writer = CsvWriter(delimiter=' ')
>>> print(writer.writerow(['hello', '', 'world']))
hello "" world
"""
# Random 16-character string that gets injected instead of any
# empty fields and is then replaced post-write with doubled-quotechar.
# Created with:
# ''.join(random.choice(string.printable[:90]) for _ in range(16))
replace_sentinel = '2b=48Av%0-V3p>bX'
def __init__(self, csvfile=None, **kwargs):
self.csvfile = csvfile
# Temporary StringIO for catching the real csv.writer() object output
self.temp_out = StringIO()
self.writer = csv.writer(self.temp_out, **kwargs)
dialect = self.writer.dialect
self.quotechar2 = dialect.quotechar * 2
self.quote_empty = (dialect.quoting == csv.QUOTE_MINIMAL) and (dialect.delimiter == ' ')
def writerow(self, values):
"""
Similar to csv.writer.writerow but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for i, value in enumerate(values):
if value == '':
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerow, values, has_empty)
def writerows(self, values_list):
"""
Similar to csv.writer.writerows but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for values in values_list:
for i, value in enumerate(values):
if value == '':
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerows, values_list, has_empty)
def _writerow(self, writerow_func, values, has_empty):
"""
Call ``writerow_func`` (either writerow or writerows) with ``values``.
If it has empty fields that have been replaced then change those
sentinel strings back to quoted empty strings, e.g. ``""``.
"""
# Clear the temporary StringIO buffer that self.writer writes into and
# then call the real csv.writer().writerow or writerows with values.
self.temp_out.seek(0)
self.temp_out.truncate()
writerow_func(values)
row_string = self.temp_out.getvalue()
if self.quote_empty and has_empty:
row_string = re.sub(self.replace_sentinel, self.quotechar2, row_string)
# self.csvfile is defined then write the output. In practice the pure
# Python writer calls with csvfile=None, while the fast writer calls with
# a file-like object.
if self.csvfile:
self.csvfile.write(row_string)
return row_string
class MaskedConstant(numpy.ma.core.MaskedConstant):
"""A trivial extension of numpy.ma.masked
We want to be able to put the generic term ``masked`` into a dictionary.
The constant ``numpy.ma.masked`` is not hashable (see
https://github.com/numpy/numpy/issues/4660), so we need to extend it
here with a hash value.
See https://github.com/numpy/numpy/issues/11021 for rationale for
__copy__ and __deepcopy__ methods.
"""
def __hash__(self):
'''All instances of this class shall have the same hash.'''
# Any large number will do.
return 1234567890
def __copy__(self):
"""This is a singleton so just return self."""
return self
def __deepcopy__(self, memo):
return self
masked = MaskedConstant()
class InconsistentTableError(ValueError):
"""
Indicates that an input table is inconsistent in some way.
The default behavior of ``BaseReader`` is to throw an instance of
this class if a data row doesn't match the header.
"""
class OptionalTableImportError(ImportError):
"""
Indicates that a dependency for table reading is not present.
An instance of this class is raised whenever an optional reader
with certain required dependencies cannot operate because of
an ImportError.
"""
class ParameterError(NotImplementedError):
"""
Indicates that a reader cannot handle a passed parameter.
The C-based fast readers in ``io.ascii`` raise an instance of
this error class upon encountering a parameter that the
C engine cannot handle.
"""
class FastOptionsError(NotImplementedError):
"""
Indicates that one of the specified options for fast
reading is invalid.
"""
class NoType:
"""
Superclass for ``StrType`` and ``NumType`` classes.
This class is the default type of ``Column`` and provides a base
class for other data types.
"""
class StrType(NoType):
"""
Indicates that a column consists of text data.
"""
class NumType(NoType):
"""
Indicates that a column consists of numerical data.
"""
class FloatType(NumType):
"""
Describes floating-point data.
"""
class BoolType(NoType):
"""
Describes boolean data.
"""
class IntType(NumType):
"""
Describes integer data.
"""
class AllType(StrType, FloatType, IntType):
"""
Subclass of all other data types.
This type is returned by ``convert_numpy`` if the given numpy
type does not match ``StrType``, ``FloatType``, or ``IntType``.
"""
class Column:
"""Table column.
The key attributes of a Column object are:
* **name** : column name
* **type** : column type (NoType, StrType, NumType, FloatType, IntType)
* **dtype** : numpy dtype (optional, overrides **type** if set)
* **str_vals** : list of column values as strings
* **fill_values** : dict of fill values
* **shape** : list of element shape (default [] => scalar)
* **data** : list of converted column values
* **subtype** : actual datatype for columns serialized with JSON
"""
def __init__(self, name):
self.name = name
self.type = NoType # Generic type (Int, Float, Str etc)
self.dtype = None # Numpy dtype if available
self.str_vals = []
self.fill_values = {}
self.shape = []
self.subtype = None
class BaseInputter:
"""
Get the lines from the table input and return a list of lines.
"""
encoding = None
"""Encoding used to read the file"""
def get_lines(self, table, newline=None):
"""
Get the lines from the ``table`` input. The input table can be one of:
* File name
* String (newline separated) with all header and data lines (must have at least 2 lines)
* File-like object with read() method
* List of strings
Parameters
----------
table : str, file-like, list
Can be either a file name, string (newline separated) with all header and data
lines (must have at least 2 lines), a file-like object with a
``read()`` method, or a list of strings.
newline :
Line separator. If `None` use OS default from ``splitlines()``.
Returns
-------
lines : list
List of lines
"""
try:
if (hasattr(table, 'read')
or ('\n' not in table + '' and '\r' not in table + '')):
with get_readable_fileobj(table,
encoding=self.encoding) as fileobj:
table = fileobj.read()
if newline is None:
lines = table.splitlines()
else:
lines = table.split(newline)
except TypeError:
try:
# See if table supports indexing, slicing, and iteration
table[0]
table[0:1]
iter(table)
if len(table) > 1:
lines = table
else:
# treat single entry as if string had been passed directly
if newline is None:
lines = table[0].splitlines()
else:
lines = table[0].split(newline)
except TypeError:
raise TypeError(
'Input "table" must be a string (filename or data) or an iterable')
return self.process_lines(lines)
def process_lines(self, lines):
"""Process lines for subsequent use. In the default case do nothing.
This routine is not generally intended for removing comment lines or
stripping whitespace. These are done (if needed) in the header and
data line processing.
Override this method if something more has to be done to convert raw
input lines to the table rows. For example the
ContinuationLinesInputter derived class accounts for continuation
characters if a row is split into lines."""
return lines
class BaseSplitter:
"""
Base splitter that uses python's split method to do the work.
This does not handle quoted values. A key feature is the formulation of
__call__ as a generator that returns a list of the split line values at
each iteration.
There are two methods that are intended to be overridden, first
``process_line()`` to do pre-processing on each input line before splitting
and ``process_val()`` to do post-processing on each split string value. By
default these apply the string ``strip()`` function. These can be set to
another function via the instance attribute or be disabled entirely, for
example::
reader.header.splitter.process_val = lambda x: x.lstrip()
reader.data.splitter.process_val = None
"""
delimiter = None
""" one-character string used to separate fields """
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end."""
return line.strip()
def process_val(self, val):
"""Remove whitespace at the beginning or end of value."""
return val.strip()
def __call__(self, lines):
if self.process_line:
lines = (self.process_line(x) for x in lines)
for line in lines:
vals = line.split(self.delimiter)
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
if self.delimiter is None:
delimiter = ' '
else:
delimiter = self.delimiter
return delimiter.join(str(x) for x in vals)
class DefaultSplitter(BaseSplitter):
"""Default class to split strings into columns using python csv. The class
attributes are taken from the csv Dialect class.
Typical usage::
# lines = ..
splitter = ascii.DefaultSplitter()
for col_vals in splitter(lines):
for col_val in col_vals:
...
"""
delimiter = ' '
""" one-character string used to separate fields. """
quotechar = '"'
""" control how instances of *quotechar* in a field are quoted """
doublequote = True
""" character to remove special meaning from following character """
escapechar = None
""" one-character stringto quote fields containing special characters """
quoting = csv.QUOTE_MINIMAL
""" control when quotes are recognized by the reader """
skipinitialspace = True
""" ignore whitespace immediately following the delimiter """
csv_writer = None
csv_writer_out = StringIO()
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end.
If splitting on whitespace then replace unquoted tabs with space first"""
if self.delimiter == r'\s':
line = _replace_tab_with_space(line, self.escapechar, self.quotechar)
return line.strip() + '\n'
def process_val(self, val):
"""Remove whitespace at the beginning or end of value."""
return val.strip(' \t')
def __call__(self, lines):
"""Return an iterator over the table ``lines``, where each iterator output
is a list of the split line values.
Parameters
----------
lines : list
List of table lines
Yields
------
line : list of str
Each line's split values.
"""
if self.process_line:
lines = [self.process_line(x) for x in lines]
delimiter = ' ' if self.delimiter == r'\s' else self.delimiter
csv_reader = csv.reader(lines,
delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting,
skipinitialspace=self.skipinitialspace
)
for vals in csv_reader:
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
delimiter = ' ' if self.delimiter is None else str(self.delimiter)
if self.csv_writer is None:
self.csv_writer = CsvWriter(delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting)
if self.process_val:
vals = [self.process_val(x) for x in vals]
out = self.csv_writer.writerow(vals).rstrip('\r\n')
return out
def _replace_tab_with_space(line, escapechar, quotechar):
"""Replace tabs with spaces in given string, preserving quoted substrings
Parameters
----------
line : str
String containing tabs to be replaced with spaces.
escapechar : str
Character in ``line`` used to escape special characters.
quotechar : str
Character in ``line`` indicating the start/end of a substring.
Returns
-------
line : str
A copy of ``line`` with tabs replaced by spaces, preserving quoted substrings.
"""
newline = []
in_quote = False
lastchar = 'NONE'
for char in line:
if char == quotechar and lastchar != escapechar:
in_quote = not in_quote
if char == '\t' and not in_quote:
char = ' '
lastchar = char
newline.append(char)
return ''.join(newline)
def _get_line_index(line_or_func, lines):
"""Return the appropriate line index, depending on ``line_or_func`` which
can be either a function, a positive or negative int, or None.
"""
if hasattr(line_or_func, '__call__'):
return line_or_func(lines)
elif line_or_func:
if line_or_func >= 0:
return line_or_func
else:
n_lines = sum(1 for line in lines)
return n_lines + line_or_func
else:
return line_or_func
class BaseHeader:
"""
Base table header reader
"""
auto_format = 'col{}'
""" format string for auto-generating column names """
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
names = None
""" list of names corresponding to each data column """
write_comment = False
write_spacer_lines = ['ASCII_TABLE_WRITE_SPACER_LINE']
def __init__(self):
self.splitter = self.splitter_class()
def _set_cols_from_names(self):
self.cols = [Column(name=x) for x in self.names]
def update_meta(self, lines, meta):
"""
Extract any table-level metadata, e.g. keywords, comments, column metadata, from
the table ``lines`` and update the OrderedDict ``meta`` in place. This base
method extracts comment lines and stores them in ``meta`` for output.
"""
if self.comment:
re_comment = re.compile(self.comment)
comment_lines = [x for x in lines if re_comment.match(x)]
else:
comment_lines = []
comment_lines = [re.sub('^' + self.comment, '', x).strip()
for x in comment_lines]
if comment_lines:
meta.setdefault('table', {})['comments'] = comment_lines
def get_cols(self, lines):
"""Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
start_line = _get_line_index(self.start_line, self.process_lines(lines))
if start_line is None:
# No header line so auto-generate names from n_data_cols
# Get the data values from the first line of table data to determine n_data_cols
try:
first_data_vals = next(self.data.get_str_vals())
except StopIteration:
raise InconsistentTableError('No data lines found so cannot autogenerate '
'column names')
n_data_cols = len(first_data_vals)
self.names = [self.auto_format.format(i)
for i in range(1, n_data_cols + 1)]
else:
for i, line in enumerate(self.process_lines(lines)):
if i == start_line:
break
else: # No header line matching
raise ValueError('No header line found in table')
self.names = next(self.splitter([line]))
self._set_cols_from_names()
def process_lines(self, lines):
"""Generator to yield non-blank and non-comment lines"""
re_comment = re.compile(self.comment) if self.comment else None
# Yield non-comment lines
for line in lines:
if line.strip() and (not self.comment or not re_comment.match(line)):
yield line
def write_comments(self, lines, meta):
if self.write_comment not in (False, None):
for comment in meta.get('comments', []):
lines.append(self.write_comment + comment)
def write(self, lines):
if self.start_line is not None:
for i, spacer_line in zip(range(self.start_line),
itertools.cycle(self.write_spacer_lines)):
lines.append(spacer_line)
lines.append(self.splitter.join([x.info.name for x in self.cols]))
@property
def colnames(self):
"""Return the column names of the table"""
return tuple(col.name if isinstance(col, Column) else col.info.name
for col in self.cols)
def remove_columns(self, names):
"""
Remove several columns from the table.
Parameters
----------
names : list
A list containing the names of the columns to remove
"""
colnames = self.colnames
for name in names:
if name not in colnames:
raise KeyError(f"Column {name} does not exist")
self.cols = [col for col in self.cols if col.name not in names]
def rename_column(self, name, new_name):
"""
Rename a column.
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
"""
try:
idx = self.colnames.index(name)
except ValueError:
raise KeyError(f"Column {name} does not exist")
col = self.cols[idx]
# For writing self.cols can contain cols that are not Column. Raise
# exception in that case.
if isinstance(col, Column):
col.name = new_name
else:
raise TypeError(f'got column type {type(col)} instead of required '
f'{Column}')
def get_type_map_key(self, col):
return col.raw_type
def get_col_type(self, col):
try:
type_map_key = self.get_type_map_key(col)
return self.col_type_map[type_map_key.lower()]
except KeyError:
raise ValueError('Unknown data type ""{}"" for column "{}"'.format(
col.raw_type, col.name))
def check_column_names(self, names, strict_names, guessing):
"""
Check column names.
This must be done before applying the names transformation
so that guessing will fail appropriately if ``names`` is supplied.
For instance if the basic reader is given a table with no column header
row.
Parameters
----------
names : list
User-supplied list of column names
strict_names : bool
Whether to impose extra requirements on names
guessing : bool
True if this method is being called while guessing the table format
"""
if strict_names:
# Impose strict requirements on column names (normally used in guessing)
bads = [" ", ",", "|", "\t", "'", '"']
for name in self.colnames:
if (_is_number(name) or len(name) == 0
or name[0] in bads or name[-1] in bads):
raise InconsistentTableError(
f'Column name {name!r} does not meet strict name requirements')
# When guessing require at least two columns, except for ECSV which can
# reliably be guessed from the header requirements.
if guessing and len(self.colnames) <= 1 and self.__class__.__name__ != 'EcsvHeader':
raise ValueError('Table format guessing requires at least two columns, got {}'
.format(list(self.colnames)))
if names is not None and len(names) != len(self.colnames):
raise InconsistentTableError(
'Length of names argument ({}) does not match number'
' of table columns ({})'.format(len(names), len(self.colnames)))
class BaseData:
"""
Base table data reader.
"""
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
end_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" Regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
write_spacer_lines = ['ASCII_TABLE_WRITE_SPACER_LINE']
fill_include_names = None
fill_exclude_names = None
fill_values = [(masked, '')]
formats = {}
def __init__(self):
# Need to make sure fill_values list is instance attribute, not class attribute.
# On read, this will be overwritten by the default in the ui.read (thus, in
# the current implementation there can be no different default for different
# Readers). On write, ui.py does not specify a default, so this line here matters.
self.fill_values = copy.copy(self.fill_values)
self.formats = copy.copy(self.formats)
self.splitter = self.splitter_class()
def process_lines(self, lines):
"""
READ: Strip out comment lines and blank lines from list of ``lines``
Parameters
----------
lines : list
All lines in table
Returns
-------
lines : list
List of lines
"""
nonblank_lines = (x for x in lines if x.strip())
if self.comment:
re_comment = re.compile(self.comment)
return [x for x in nonblank_lines if not re_comment.match(x)]
else:
return [x for x in nonblank_lines]
def get_data_lines(self, lines):
"""READ: Set ``data_lines`` attribute to lines slice comprising table data values.
"""
data_lines = self.process_lines(lines)
start_line = _get_line_index(self.start_line, data_lines)
end_line = _get_line_index(self.end_line, data_lines)
if start_line is not None or end_line is not None:
self.data_lines = data_lines[slice(start_line, end_line)]
else: # Don't copy entire data lines unless necessary
self.data_lines = data_lines
def get_str_vals(self):
"""Return a generator that returns a list of column values (as strings)
for each data line."""
return self.splitter(self.data_lines)
def masks(self, cols):
"""READ: Set fill value for each column and then apply that fill value
In the first step it is evaluated with value from ``fill_values`` applies to
which column using ``fill_include_names`` and ``fill_exclude_names``.
In the second step all replacements are done for the appropriate columns.
"""
if self.fill_values:
self._set_fill_values(cols)
self._set_masks(cols)
def _set_fill_values(self, cols):
"""READ, WRITE: Set fill values of individual cols based on fill_values of BaseData
fill values has the following form:
<fill_spec> = (<bad_value>, <fill_value>, <optional col_name>...)
fill_values = <fill_spec> or list of <fill_spec>'s
"""
if self.fill_values:
# when we write tables the columns may be astropy.table.Columns
# which don't carry a fill_values by default
for col in cols:
if not hasattr(col, 'fill_values'):
col.fill_values = {}
# if input is only one <fill_spec>, then make it a list
with suppress(TypeError):
self.fill_values[0] + ''
self.fill_values = [self.fill_values]
# Step 1: Set the default list of columns which are affected by
# fill_values
colnames = set(self.header.colnames)
if self.fill_include_names is not None:
colnames.intersection_update(self.fill_include_names)
if self.fill_exclude_names is not None:
colnames.difference_update(self.fill_exclude_names)
# Step 2a: Find out which columns are affected by this tuple
# iterate over reversed order, so last condition is set first and
# overwritten by earlier conditions
for replacement in reversed(self.fill_values):
if len(replacement) < 2:
raise ValueError("Format of fill_values must be "
"(<bad>, <fill>, <optional col1>, ...)")
elif len(replacement) == 2:
affect_cols = colnames
else:
affect_cols = replacement[2:]
for i, key in ((i, x) for i, x in enumerate(self.header.colnames)
if x in affect_cols):
cols[i].fill_values[replacement[0]] = str(replacement[1])
def _set_masks(self, cols):
"""READ: Replace string values in col.str_vals and set masks"""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
col.mask = numpy.zeros(len(col.str_vals), dtype=bool)
for i, str_val in ((i, x) for i, x in enumerate(col.str_vals)
if x in col.fill_values):
col.str_vals[i] = col.fill_values[str_val]
col.mask[i] = True
def _replace_vals(self, cols):
"""WRITE: replace string values in col.str_vals"""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
for i, str_val in ((i, x) for i, x in enumerate(col.str_vals)
if x in col.fill_values):
col.str_vals[i] = col.fill_values[str_val]
if masked in col.fill_values and hasattr(col, 'mask'):
mask_val = col.fill_values[masked]
for i in col.mask.nonzero()[0]:
col.str_vals[i] = mask_val
def str_vals(self):
"""WRITE: convert all values in table to a list of lists of strings
This sets the fill values and possibly column formats from the input
formats={} keyword, then ends up calling table.pprint._pformat_col_iter()
by a circuitous path. That function does the real work of formatting.
Finally replace anything matching the fill_values.
Returns
-------
values : list of list of str
"""
self._set_fill_values(self.cols)
self._set_col_formats()
for col in self.cols:
col.str_vals = list(col.info.iter_str_vals())
self._replace_vals(self.cols)
return [col.str_vals for col in self.cols]
def write(self, lines):
"""Write ``self.cols`` in place to ``lines``.
Parameters
----------
lines : list
List for collecting output of writing self.cols.
"""
if hasattr(self.start_line, '__call__'):
raise TypeError('Start_line attribute cannot be callable for write()')
else:
data_start_line = self.start_line or 0
while len(lines) < data_start_line:
lines.append(itertools.cycle(self.write_spacer_lines))
col_str_iters = self.str_vals()
for vals in zip(*col_str_iters):
lines.append(self.splitter.join(vals))
def _set_col_formats(self):
"""WRITE: set column formats."""
for col in self.cols:
if col.info.name in self.formats:
col.info.format = self.formats[col.info.name]
def convert_numpy(numpy_type):
"""Return a tuple containing a function which converts a list into a numpy
array and the type produced by the converter function.
Parameters
----------
numpy_type : numpy data-type
The numpy type required of an array returned by ``converter``. Must be a
valid `numpy type <https://numpy.org/doc/stable/user/basics.types.html>`_
(e.g., numpy.uint, numpy.int8, numpy.int64, numpy.float64) or a python
type covered by a numpy type (e.g., int, float, str, bool).
Returns
-------
converter : callable
``converter`` is a function which accepts a list and converts it to a
numpy array of type ``numpy_type``.
converter_type : type
``converter_type`` tracks the generic data type produced by the
converter function.
Raises
------
ValueError
Raised by ``converter`` if the list elements could not be converted to
the required type.
"""
# Infer converter type from an instance of numpy_type.
type_name = numpy.array([], dtype=numpy_type).dtype.name
if 'int' in type_name:
converter_type = IntType
elif 'float' in type_name:
converter_type = FloatType
elif 'bool' in type_name:
converter_type = BoolType
elif 'str' in type_name:
converter_type = StrType
else:
converter_type = AllType
def bool_converter(vals):
"""
Convert values "False" and "True" to bools. Raise an exception
for any other string values.
"""
if len(vals) == 0:
return numpy.array([], dtype=bool)
# Try a smaller subset first for a long array
if len(vals) > 10000:
svals = numpy.asarray(vals[:1000])
if not numpy.all((svals == 'False')
| (svals == 'True')
| (svals == '0')
| (svals == '1')):
raise ValueError('bool input strings must be False, True, 0, 1, or ""')
vals = numpy.asarray(vals)
trues = (vals == 'True') | (vals == '1')
falses = (vals == 'False') | (vals == '0')
if not numpy.all(trues | falses):
raise ValueError('bool input strings must be only False, True, 0, 1, or ""')
return trues
def generic_converter(vals):
return numpy.array(vals, numpy_type)
converter = bool_converter if converter_type is BoolType else generic_converter
return converter, converter_type
class BaseOutputter:
"""Output table as a dict of column objects keyed on column name. The
table data are stored as plain python lists within the column objects.
"""
# User-defined converters which gets set in ascii.ui if a `converter` kwarg
# is supplied.
converters = {}
# Derived classes must define default_converters and __call__
@staticmethod
def _validate_and_copy(col, converters):
"""Validate the format for the type converters and then copy those
which are valid converters for this column (i.e. converter type is
a subclass of col.type)"""
# Allow specifying a single converter instead of a list of converters.
# The input `converters` must be a ``type`` value that can init np.dtype.
try:
# Don't allow list-like things that dtype accepts
assert type(converters) is type
converters = [numpy.dtype(converters)]
except (AssertionError, TypeError):
pass
converters_out = []
try:
for converter in converters:
try:
converter_func, converter_type = converter
except TypeError as err:
if str(err).startswith('cannot unpack'):
converter_func, converter_type = convert_numpy(converter)
else:
raise
if not issubclass(converter_type, NoType):
raise ValueError('converter_type must be a subclass of NoType')
if issubclass(converter_type, col.type):
converters_out.append((converter_func, converter_type))
except (ValueError, TypeError) as err:
raise ValueError('Error: invalid format for converters, see '
f'documentation\n{converters}: {err}')
return converters_out
def _convert_vals(self, cols):
for col in cols:
for key, converters in self.converters.items():
if fnmatch.fnmatch(col.name, key):
break
else:
if col.dtype is not None:
converters = [convert_numpy(col.dtype)]
else:
converters = self.default_converters
col.converters = self._validate_and_copy(col, converters)
# Catch the last error in order to provide additional information
# in case all attempts at column conversion fail. The initial
# value of of last_error will apply if no converters are defined
# and the first col.converters[0] access raises IndexError.
last_err = 'no converters defined'
while not hasattr(col, 'data'):
# Try converters, popping the unsuccessful ones from the list.
# If there are no converters left here then fail.
if not col.converters:
raise ValueError(f'Column {col.name} failed to convert: {last_err}')
converter_func, converter_type = col.converters[0]
if not issubclass(converter_type, col.type):
raise TypeError('converter type does not match column type')
try:
col.data = converter_func(col.str_vals)
col.type = converter_type
except (TypeError, ValueError) as err:
col.converters.pop(0)
last_err = err
except OverflowError as err:
# Overflow during conversion (most likely an int that
# doesn't fit in native C long). Put string at the top of
# the converters list for the next while iteration.
warnings.warn(
"OverflowError converting to {} in column {}, reverting to String."
.format(converter_type.__name__, col.name), AstropyWarning)
col.converters.insert(0, convert_numpy(numpy.str))
last_err = err
def _deduplicate_names(names):
"""Ensure there are no duplicates in ``names``
This is done by iteratively adding ``_<N>`` to the name for increasing N
until the name is unique.
"""
new_names = []
existing_names = set()
for name in names:
base_name = name + '_'
i = 1
while name in existing_names:
# Iterate until a unique name is found
name = base_name + str(i)
i += 1
new_names.append(name)
existing_names.add(name)
return new_names
class TableOutputter(BaseOutputter):
"""
Output the table as an astropy.table.Table object.
"""
default_converters = [convert_numpy(int),
convert_numpy(float),
convert_numpy(str)]
def __call__(self, cols, meta):
# Sets col.data to numpy array and col.type to io.ascii Type class (e.g.
# FloatType) for each col.
self._convert_vals(cols)
t_cols = [numpy.ma.MaskedArray(x.data, mask=x.mask)
if hasattr(x, 'mask') and numpy.any(x.mask)
else x.data for x in cols]
out = Table(t_cols, names=[x.name for x in cols], meta=meta['table'])
for col, out_col in zip(cols, out.columns.values()):
for attr in ('format', 'unit', 'description'):
if hasattr(col, attr):
setattr(out_col, attr, getattr(col, attr))
if hasattr(col, 'meta'):
out_col.meta.update(col.meta)
return out
class MetaBaseReader(type):
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
format = dct.get('_format_name')
if format is None:
return
fast = dct.get('_fast')
if fast is not None:
FAST_CLASSES[format] = cls
FORMAT_CLASSES[format] = cls
io_formats = ['ascii.' + format] + dct.get('_io_registry_format_aliases', [])
if dct.get('_io_registry_suffix'):
func = functools.partial(connect.io_identify, dct['_io_registry_suffix'])
connect.io_registry.register_identifier(io_formats[0], Table, func)
for io_format in io_formats:
func = functools.partial(connect.io_read, io_format)
header = f"ASCII reader '{io_format}' details\n"
func.__doc__ = (inspect.cleandoc(READ_DOCSTRING).strip() + '\n\n'
+ header + re.sub('.', '=', header) + '\n')
func.__doc__ += inspect.cleandoc(cls.__doc__).strip()
connect.io_registry.register_reader(io_format, Table, func)
if dct.get('_io_registry_can_write', True):
func = functools.partial(connect.io_write, io_format)
header = f"ASCII writer '{io_format}' details\n"
func.__doc__ = (inspect.cleandoc(WRITE_DOCSTRING).strip() + '\n\n'
+ header + re.sub('.', '=', header) + '\n')
func.__doc__ += inspect.cleandoc(cls.__doc__).strip()
connect.io_registry.register_writer(io_format, Table, func)
def _is_number(x):
with suppress(ValueError):
x = float(x)
return True
return False
def _apply_include_exclude_names(table, names, include_names, exclude_names):
"""
Apply names, include_names and exclude_names to a table or BaseHeader.
For the latter this relies on BaseHeader implementing ``colnames``,
``rename_column``, and ``remove_columns``.
Parameters
----------
table : `~astropy.table.Table`, `~astropy.io.ascii.BaseHeader`
Input table or BaseHeader subclass instance
names : list
List of names to override those in table (set to None to use existing names)
include_names : list
List of names to include in output
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
"""
def rename_columns(table, names):
# Rename table column names to those passed by user
# Temporarily rename with names that are not in `names` or `table.colnames`.
# This ensures that rename succeeds regardless of existing names.
xxxs = 'x' * max(len(name) for name in list(names) + list(table.colnames))
for ii, colname in enumerate(table.colnames):
table.rename_column(colname, xxxs + str(ii))
for ii, name in enumerate(names):
table.rename_column(xxxs + str(ii), name)
if names is not None:
rename_columns(table, names)
else:
colnames_uniq = _deduplicate_names(table.colnames)
if colnames_uniq != list(table.colnames):
rename_columns(table, colnames_uniq)
names_set = set(table.colnames)
if include_names is not None:
names_set.intersection_update(include_names)
if exclude_names is not None:
names_set.difference_update(exclude_names)
if names_set != set(table.colnames):
remove_names = set(table.colnames) - names_set
table.remove_columns(remove_names)
class BaseReader(metaclass=MetaBaseReader):
"""Class providing methods to read and write an ASCII table using the specified
header, data, inputter, and outputter instances.
Typical usage is to instantiate a Reader() object and customize the
``header``, ``data``, ``inputter``, and ``outputter`` attributes. Each
of these is an object of the corresponding class.
There is one method ``inconsistent_handler`` that can be used to customize the
behavior of ``read()`` in the event that a data row doesn't match the header.
The default behavior is to raise an InconsistentTableError.
"""
names = None
include_names = None
exclude_names = None
strict_names = False
guessing = False
encoding = None
header_class = BaseHeader
data_class = BaseData
inputter_class = BaseInputter
outputter_class = TableOutputter
# Max column dimension that writer supports for this format. Exceptions
# include ECSV (no limit) and HTML (max_ndim=2).
max_ndim = 1
def __init__(self):
self.header = self.header_class()
self.data = self.data_class()
self.inputter = self.inputter_class()
self.outputter = self.outputter_class()
# Data and Header instances benefit from a little cross-coupling. Header may need to
# know about number of data columns for auto-column name generation and Data may
# need to know about header (e.g. for fixed-width tables where widths are spec'd in header.
self.data.header = self.header
self.header.data = self.data
# Metadata, consisting of table-level meta and column-level meta. The latter
# could include information about column type, description, formatting, etc,
# depending on the table meta format.
self.meta = OrderedDict(table=OrderedDict(),
cols=OrderedDict())
def _check_multidim_table(self, table):
"""Check that the dimensions of columns in ``table`` are acceptable.
The reader class attribute ``max_ndim`` defines the maximum dimension of
columns that can be written using this format. The base value is ``1``,
corresponding to normal scalar columns with just a length.
Parameters
----------
table : `~astropy.table.Table`
Input table.
Raises
------
ValueError
If any column exceeds the number of allowed dimensions
"""
_check_multidim_table(table, self.max_ndim)
def read(self, table):
"""Read the ``table`` and return the results in a format determined by
the ``outputter`` attribute.
The ``table`` parameter is any string or object that can be processed
by the instance ``inputter``. For the base Inputter class ``table`` can be
one of:
* File name
* File-like object
* String (newline separated) with all header and data lines (must have at least 2 lines)
* List of strings
Parameters
----------
table : str, file-like, list
Input table.
Returns
-------
table : `~astropy.table.Table`
Output table
"""
# If ``table`` is a file then store the name in the ``data``
# attribute. The ``table`` is a "file" if it is a string
# without the new line specific to the OS.
with suppress(TypeError):
# Strings only
if os.linesep not in table + '':
self.data.table_name = os.path.basename(table)
# If one of the newline chars is set as field delimiter, only
# accept the other one as line splitter
if self.header.splitter.delimiter == '\n':
newline = '\r'
elif self.header.splitter.delimiter == '\r':
newline = '\n'
else:
newline = None
# Get a list of the lines (rows) in the table
self.lines = self.inputter.get_lines(table, newline=newline)
# Set self.data.data_lines to a slice of lines contain the data rows
self.data.get_data_lines(self.lines)
# Extract table meta values (e.g. keywords, comments, etc). Updates self.meta.
self.header.update_meta(self.lines, self.meta)
# Get the table column definitions
self.header.get_cols(self.lines)
# Make sure columns are valid
self.header.check_column_names(self.names, self.strict_names, self.guessing)
self.cols = cols = self.header.cols
self.data.splitter.cols = cols
n_cols = len(cols)
for i, str_vals in enumerate(self.data.get_str_vals()):
if len(str_vals) != n_cols:
str_vals = self.inconsistent_handler(str_vals, n_cols)
# if str_vals is None, we skip this row
if str_vals is None:
continue
# otherwise, we raise an error only if it is still inconsistent
if len(str_vals) != n_cols:
errmsg = ('Number of header columns ({}) inconsistent with'
' data columns ({}) at data line {}\n'
'Header values: {}\n'
'Data values: {}'.format(
n_cols, len(str_vals), i,
[x.name for x in cols], str_vals))
raise InconsistentTableError(errmsg)
for j, col in enumerate(cols):
col.str_vals.append(str_vals[j])
self.data.masks(cols)
if hasattr(self.header, 'table_meta'):
self.meta['table'].update(self.header.table_meta)
_apply_include_exclude_names(self.header, self.names,
self.include_names, self.exclude_names)
table = self.outputter(self.header.cols, self.meta)
self.cols = self.header.cols
return table
def inconsistent_handler(self, str_vals, ncols):
"""
Adjust or skip data entries if a row is inconsistent with the header.
The default implementation does no adjustment, and hence will always trigger
an exception in read() any time the number of data entries does not match
the header.
Note that this will *not* be called if the row already matches the header.
Parameters
----------
str_vals : list
A list of value strings from the current row of the table.
ncols : int
The expected number of entries from the table header.
Returns
-------
str_vals : list
List of strings to be parsed into data entries in the output table. If
the length of this list does not match ``ncols``, an exception will be
raised in read(). Can also be None, in which case the row will be
skipped.
"""
# an empty list will always trigger an InconsistentTableError in read()
return str_vals
@property
def comment_lines(self):
"""Return lines in the table that match header.comment regexp"""
if not hasattr(self, 'lines'):
raise ValueError('Table must be read prior to accessing the header comment lines')
if self.header.comment:
re_comment = re.compile(self.header.comment)
comment_lines = [x for x in self.lines if re_comment.match(x)]
else:
comment_lines = []
return comment_lines
def update_table_data(self, table):
"""
Update table columns in place if needed.
This is a hook to allow updating the table columns after name
filtering but before setting up to write the data. This is currently
only used by ECSV and is otherwise just a pass-through.
Parameters
----------
table : `astropy.table.Table`
Input table for writing
Returns
-------
table : `astropy.table.Table`
Output table for writing
"""
return table
def write_header(self, lines, meta):
self.header.write_comments(lines, meta)
self.header.write(lines)
def write(self, table):
"""
Write ``table`` as list of strings.
Parameters
----------
table : `~astropy.table.Table`
Input table data.
Returns
-------
lines : list
List of strings corresponding to ASCII table
"""
# Check column names before altering
self.header.cols = list(table.columns.values())
self.header.check_column_names(self.names, self.strict_names, False)
# In-place update of columns in input ``table`` to reflect column
# filtering. Note that ``table`` is guaranteed to be a copy of the
# original user-supplied table.
_apply_include_exclude_names(table, self.names, self.include_names, self.exclude_names)
# This is a hook to allow updating the table columns after name
# filtering but before setting up to write the data. This is currently
# only used by ECSV and is otherwise just a pass-through.
table = self.update_table_data(table)
# Check that table column dimensions are supported by this format class.
# Most formats support only 1-d columns, but some like ECSV support N-d.
self._check_multidim_table(table)
# Now use altered columns
new_cols = list(table.columns.values())
# link information about the columns to the writer object (i.e. self)
self.header.cols = new_cols
self.data.cols = new_cols
self.header.table_meta = table.meta
# Write header and data to lines list
lines = []
self.write_header(lines, table.meta)
self.data.write(lines)
return lines
class ContinuationLinesInputter(BaseInputter):
"""Inputter where lines ending in ``continuation_char`` are joined
with the subsequent line. Example::
col1 col2 col3
1 \
2 3
4 5 \
6
"""
continuation_char = '\\'
replace_char = ' '
# If no_continue is not None then lines matching this regex are not subject
# to line continuation. The initial use case here is Daophot. In this
# case the continuation character is just replaced with replace_char.
no_continue = None
def process_lines(self, lines):
re_no_continue = re.compile(self.no_continue) if self.no_continue else None
parts = []
outlines = []
for line in lines:
if re_no_continue and re_no_continue.match(line):
line = line.replace(self.continuation_char, self.replace_char)
if line.endswith(self.continuation_char):
parts.append(line.replace(self.continuation_char, self.replace_char))
else:
parts.append(line)
outlines.append(''.join(parts))
parts = []
return outlines
class WhitespaceSplitter(DefaultSplitter):
def process_line(self, line):
"""Replace tab with space within ``line`` while respecting quoted substrings"""
newline = []
in_quote = False
lastchar = None
for char in line:
if char == self.quotechar and (self.escapechar is None
or lastchar != self.escapechar):
in_quote = not in_quote
if char == '\t' and not in_quote:
char = ' '
lastchar = char
newline.append(char)
return ''.join(newline)
extra_reader_pars = ('Reader', 'Inputter', 'Outputter',
'delimiter', 'comment', 'quotechar', 'header_start',
'data_start', 'data_end', 'converters', 'encoding',
'data_Splitter', 'header_Splitter',
'names', 'include_names', 'exclude_names', 'strict_names',
'fill_values', 'fill_include_names', 'fill_exclude_names')
def _get_reader(Reader, Inputter=None, Outputter=None, **kwargs):
"""Initialize a table reader allowing for common customizations. See ui.get_reader()
for param docs. This routine is for internal (package) use only and is useful
because it depends only on the "core" module.
"""
from .fastbasic import FastBasic
if issubclass(Reader, FastBasic): # Fast readers handle args separately
if Inputter is not None:
kwargs['Inputter'] = Inputter
return Reader(**kwargs)
# If user explicitly passed a fast reader with enable='force'
# (e.g. by passing non-default options), raise an error for slow readers
if 'fast_reader' in kwargs:
if kwargs['fast_reader']['enable'] == 'force':
raise ParameterError('fast_reader required with '
'{}, but this is not a fast C reader: {}'
.format(kwargs['fast_reader'], Reader))
else:
del kwargs['fast_reader'] # Otherwise ignore fast_reader parameter
reader_kwargs = dict([k, v] for k, v in kwargs.items() if k not in extra_reader_pars)
reader = Reader(**reader_kwargs)
if Inputter is not None:
reader.inputter = Inputter()
if Outputter is not None:
reader.outputter = Outputter()
# Issue #855 suggested to set data_start to header_start + default_header_length
# Thus, we need to retrieve this from the class definition before resetting these numbers.
try:
default_header_length = reader.data.start_line - reader.header.start_line
except TypeError: # Start line could be None or an instancemethod
default_header_length = None
# csv.reader is hard-coded to recognise either '\r' or '\n' as end-of-line,
# therefore DefaultSplitter cannot handle these as delimiters.
if 'delimiter' in kwargs:
if kwargs['delimiter'] in ('\n', '\r', '\r\n'):
reader.header.splitter = BaseSplitter()
reader.data.splitter = BaseSplitter()
reader.header.splitter.delimiter = kwargs['delimiter']
reader.data.splitter.delimiter = kwargs['delimiter']
if 'comment' in kwargs:
reader.header.comment = kwargs['comment']
reader.data.comment = kwargs['comment']
if 'quotechar' in kwargs:
reader.header.splitter.quotechar = kwargs['quotechar']
reader.data.splitter.quotechar = kwargs['quotechar']
if 'data_start' in kwargs:
reader.data.start_line = kwargs['data_start']
if 'data_end' in kwargs:
reader.data.end_line = kwargs['data_end']
if 'header_start' in kwargs:
if (reader.header.start_line is not None):
reader.header.start_line = kwargs['header_start']
# For FixedWidthTwoLine the data_start is calculated relative to the position line.
# However, position_line is given as absolute number and not relative to header_start.
# So, ignore this Reader here.
if (('data_start' not in kwargs) and (default_header_length is not None)
and reader._format_name not in ['fixed_width_two_line', 'commented_header']):
reader.data.start_line = reader.header.start_line + default_header_length
elif kwargs['header_start'] is not None:
# User trying to set a None header start to some value other than None
raise ValueError('header_start cannot be modified for this Reader')
if 'converters' in kwargs:
reader.outputter.converters = kwargs['converters']
if 'data_Splitter' in kwargs:
reader.data.splitter = kwargs['data_Splitter']()
if 'header_Splitter' in kwargs:
reader.header.splitter = kwargs['header_Splitter']()
if 'names' in kwargs:
reader.names = kwargs['names']
if None in reader.names:
raise TypeError('Cannot have None for column name')
if len(set(reader.names)) != len(reader.names):
raise ValueError('Duplicate column names')
if 'include_names' in kwargs:
reader.include_names = kwargs['include_names']
if 'exclude_names' in kwargs:
reader.exclude_names = kwargs['exclude_names']
# Strict names is normally set only within the guessing process to
# indicate that column names cannot be numeric or have certain
# characters at the beginning or end. It gets used in
# BaseHeader.check_column_names().
if 'strict_names' in kwargs:
reader.strict_names = kwargs['strict_names']
if 'fill_values' in kwargs:
reader.data.fill_values = kwargs['fill_values']
if 'fill_include_names' in kwargs:
reader.data.fill_include_names = kwargs['fill_include_names']
if 'fill_exclude_names' in kwargs:
reader.data.fill_exclude_names = kwargs['fill_exclude_names']
if 'encoding' in kwargs:
reader.encoding = kwargs['encoding']
reader.inputter.encoding = kwargs['encoding']
return reader
extra_writer_pars = ('delimiter', 'comment', 'quotechar', 'formats',
'strip_whitespace',
'names', 'include_names', 'exclude_names',
'fill_values', 'fill_include_names',
'fill_exclude_names')
def _get_writer(Writer, fast_writer, **kwargs):
"""Initialize a table writer allowing for common customizations. This
routine is for internal (package) use only and is useful because it depends
only on the "core" module."""
from .fastbasic import FastBasic
# A value of None for fill_values imply getting the default string
# representation of masked values (depending on the writer class), but the
# machinery expects a list. The easiest here is to just pop the value off,
# i.e. fill_values=None is the same as not providing it at all.
if 'fill_values' in kwargs and kwargs['fill_values'] is None:
del kwargs['fill_values']
if issubclass(Writer, FastBasic): # Fast writers handle args separately
return Writer(**kwargs)
elif fast_writer and f'fast_{Writer._format_name}' in FAST_CLASSES:
# Switch to fast writer
kwargs['fast_writer'] = fast_writer
return FAST_CLASSES[f'fast_{Writer._format_name}'](**kwargs)
writer_kwargs = dict([k, v] for k, v in kwargs.items() if k not in extra_writer_pars)
writer = Writer(**writer_kwargs)
if 'delimiter' in kwargs:
writer.header.splitter.delimiter = kwargs['delimiter']
writer.data.splitter.delimiter = kwargs['delimiter']
if 'comment' in kwargs:
writer.header.write_comment = kwargs['comment']
writer.data.write_comment = kwargs['comment']
if 'quotechar' in kwargs:
writer.header.splitter.quotechar = kwargs['quotechar']
writer.data.splitter.quotechar = kwargs['quotechar']
if 'formats' in kwargs:
writer.data.formats = kwargs['formats']
if 'strip_whitespace' in kwargs:
if kwargs['strip_whitespace']:
# Restore the default SplitterClass process_val method which strips
# whitespace. This may have been changed in the Writer
# initialization (e.g. Rdb and Tab)
writer.data.splitter.process_val = operator.methodcaller('strip', ' \t')
else:
writer.data.splitter.process_val = None
if 'names' in kwargs:
writer.header.names = kwargs['names']
if 'include_names' in kwargs:
writer.include_names = kwargs['include_names']
if 'exclude_names' in kwargs:
writer.exclude_names = kwargs['exclude_names']
if 'fill_values' in kwargs:
# Prepend user-specified values to the class default.
with suppress(TypeError, IndexError):
# Test if it looks like (match, replace_string, optional_colname),
# in which case make it a list
kwargs['fill_values'][1] + ''
kwargs['fill_values'] = [kwargs['fill_values']]
writer.data.fill_values = kwargs['fill_values'] + writer.data.fill_values
if 'fill_include_names' in kwargs:
writer.data.fill_include_names = kwargs['fill_include_names']
if 'fill_exclude_names' in kwargs:
writer.data.fill_exclude_names = kwargs['fill_exclude_names']
return writer
|
fc2ba6203285105ca3df9986b662bf21f5c541e3c87a22e2f5d9f8f63adf991f | READ_DOCSTRING = """
Read the input ``table`` and return the table. Most of
the default behavior for various parameters is determined by the Reader
class.
See also:
- https://docs.astropy.org/en/stable/io/ascii/
- https://docs.astropy.org/en/stable/io/ascii/read.html
Parameters
----------
table : str, file-like, list, `pathlib.Path` object
Input table as a file name, file-like object, list of string[s],
single newline-separated string or `pathlib.Path` object.
guess : bool
Try to guess the table format. Defaults to None.
format : str, `~astropy.io.ascii.BaseReader`
Input table format
Inputter : `~astropy.io.ascii.BaseInputter`
Inputter class
Outputter : `~astropy.io.ascii.BaseOutputter`
Outputter class
delimiter : str
Column delimiter string
comment : str
Regular expression defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
header_start : int
Line index for the header line not counting comment or blank lines.
A line with only whitespace is considered blank.
data_start : int
Line index for the start of data not counting comment or blank lines.
A line with only whitespace is considered blank.
data_end : int
Line index for the end of data not counting comment or blank lines.
This value can be negative to count from the end.
converters : dict
Dictionary of converters to specify output column dtypes. Each key in
the dictionary is a column name or else a name matching pattern
including wildcards. The value is either a data type such as ``int`` or
``np.float32``; a list of such types which is tried in order until a
successful conversion is achieved; or a list of converter tuples (see
the `~astropy.io.ascii.convert_numpy` function for details).
data_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split data columns
header_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split header columns
names : list
List of names corresponding to each data column
include_names : list
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
fill_values : tuple, list of tuple
specification of fill values for bad or missing table values
fill_include_names : list
List of names to include in fill_values.
fill_exclude_names : list
List of names to exclude from fill_values (applied after ``fill_include_names``)
fast_reader : bool, str or dict
Whether to use the C engine, can also be a dict with options which
defaults to `False`; parameters for options dict:
use_fast_converter: bool
enable faster but slightly imprecise floating point conversion method
parallel: bool or int
multiprocessing conversion using ``cpu_count()`` or ``'number'`` processes
exponent_style: str
One-character string defining the exponent or ``'Fortran'`` to auto-detect
Fortran-style scientific notation like ``'3.14159D+00'`` (``'E'``, ``'D'``, ``'Q'``),
all case-insensitive; default ``'E'``, all other imply ``use_fast_converter``
chunk_size : int
If supplied with a value > 0 then read the table in chunks of
approximately ``chunk_size`` bytes. Default is reading table in one pass.
chunk_generator : bool
If True and ``chunk_size > 0`` then return an iterator that returns a
table for each chunk. The default is to return a single stacked table
for all the chunks.
encoding : str
Allow to specify encoding to read the file (default= ``None``).
Returns
-------
dat : `~astropy.table.Table` or <generator>
Output table
"""
# Specify allowed types for core write() keyword arguments. Each entry
# corresponds to the name of an argument and either a type (e.g. int) or a
# list of types. These get used in io.ascii.ui._validate_read_write_kwargs().
# - The commented-out kwargs are too flexible for a useful check
# - 'list-list' is a special case for an iterable that is not a string.
READ_KWARG_TYPES = {
# 'table'
'guess': bool,
# 'format'
# 'Reader'
# 'Inputter'
# 'Outputter'
'delimiter': str,
'comment': str,
'quotechar': str,
'header_start': int,
'data_start': (int, str), # CDS allows 'guess'
'data_end': int,
'converters': dict,
# 'data_Splitter'
# 'header_Splitter'
'names': 'list-like',
'include_names': 'list-like',
'exclude_names': 'list-like',
'fill_values': 'list-like',
'fill_include_names': 'list-like',
'fill_exclude_names': 'list-like',
'fast_reader': (bool, str, dict),
'encoding': str,
}
WRITE_DOCSTRING = """
Write the input ``table`` to ``filename``. Most of the default behavior
for various parameters is determined by the Writer class.
See also:
- https://docs.astropy.org/en/stable/io/ascii/
- https://docs.astropy.org/en/stable/io/ascii/write.html
Parameters
----------
table : `~astropy.io.ascii.BaseReader`, array-like, str, file-like, list
Input table as a Reader object, Numpy struct array, file name,
file-like object, list of strings, or single newline-separated string.
output : str, file-like
Output [filename, file-like object]. Defaults to``sys.stdout``.
format : str
Output table format. Defaults to 'basic'.
delimiter : str
Column delimiter string
comment : str, bool
String defining a comment line in table. If `False` then comments
are not written out.
quotechar : str
One-character string to quote fields containing special characters
formats : dict
Dictionary of format specifiers or formatting functions
strip_whitespace : bool
Strip surrounding whitespace from column values.
names : list
List of names corresponding to each data column
include_names : list
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
fast_writer : bool, str
Whether to use the fast Cython writer. Can be `True` (use fast writer
if available), `False` (do not use fast writer), or ``'force'`` (use
fast writer and fail if not available, mostly for testing).
overwrite : bool
If ``overwrite=False`` (default) and the file exists, then an OSError
is raised. This parameter is ignored when the ``output`` arg is not a
string (e.g., a file object).
"""
# Specify allowed types for core write() keyword arguments. Each entry
# corresponds to the name of an argument and either a type (e.g. int) or a
# list of types. These get used in io.ascii.ui._validate_read_write_kwargs().
# - The commented-out kwargs are too flexible for a useful check
# - 'list-list' is a special case for an iterable that is not a string.
WRITE_KWARG_TYPES = {
# 'table'
# 'output'
'format': str,
'delimiter': str,
'comment': (str, bool),
'quotechar': str,
'header_start': int,
'formats': dict,
'strip_whitespace': (bool),
'names': 'list-like',
'include_names': 'list-like',
'exclude_names': 'list-like',
'fast_writer': (bool, str),
'overwrite': (bool),
}
|
7bfc2c1c60e948d566559b188c4f4f981af0ba91780446e479daa389c154afdb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
ui.py:
Provides the main user functions for reading and writing tables.
:Copyright: Smithsonian Astrophysical Observatory (2010)
:Author: Tom Aldcroft ([email protected])
"""
import re
import os
import sys
import copy
import time
import warnings
import contextlib
import collections
from io import StringIO
import numpy as np
from . import core
from . import basic
from . import cds
from . import mrt
from . import daophot
from . import ecsv
from . import sextractor
from . import ipac
from . import latex
from . import html
from . import rst
from . import fastbasic
from . import cparser
from . import fixedwidth
from .docs import READ_KWARG_TYPES, WRITE_KWARG_TYPES
from astropy.table import Table, MaskedColumn
from astropy.utils.data import get_readable_fileobj
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
_read_trace = []
# Default setting for guess parameter in read()
_GUESS = True
def _probably_html(table, maxchars=100000):
"""
Determine if ``table`` probably contains HTML content. See PR #3693 and issue
#3691 for context.
"""
if not isinstance(table, str):
try:
# If table is an iterable (list of strings) then take the first
# maxchars of these. Make sure this is something with random
# access to exclude a file-like object
table[0]
table[:1]
size = 0
for i, line in enumerate(table):
size += len(line)
if size > maxchars:
table = table[:i + 1]
break
table = os.linesep.join(table)
except Exception:
pass
if isinstance(table, str):
# Look for signs of an HTML table in the first maxchars characters
table = table[:maxchars]
# URL ending in .htm or .html
if re.match(r'( http[s]? | ftp | file ) :// .+ \.htm[l]?$', table,
re.IGNORECASE | re.VERBOSE):
return True
# Filename ending in .htm or .html which exists
if (re.search(r'\.htm[l]?$', table[-5:], re.IGNORECASE) and
os.path.exists(os.path.expanduser(table))):
return True
# Table starts with HTML document type declaration
if re.match(r'\s* <! \s* DOCTYPE \s* HTML', table, re.IGNORECASE | re.VERBOSE):
return True
# Look for <TABLE .. >, <TR .. >, <TD .. > tag openers.
if all(re.search(fr'< \s* {element} [^>]* >', table, re.IGNORECASE | re.VERBOSE)
for element in ('table', 'tr', 'td')):
return True
return False
def set_guess(guess):
"""
Set the default value of the ``guess`` parameter for read()
Parameters
----------
guess : bool
New default ``guess`` value (e.g., True or False)
"""
global _GUESS
_GUESS = guess
def get_reader(Reader=None, Inputter=None, Outputter=None, **kwargs):
"""
Initialize a table reader allowing for common customizations. Most of the
default behavior for various parameters is determined by the Reader class.
Parameters
----------
Reader : `~astropy.io.ascii.BaseReader`
Reader class (DEPRECATED). Default is :class:`Basic`.
Inputter : `~astropy.io.ascii.BaseInputter`
Inputter class
Outputter : `~astropy.io.ascii.BaseOutputter`
Outputter class
delimiter : str
Column delimiter string
comment : str
Regular expression defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
header_start : int
Line index for the header line not counting comment or blank lines.
A line with only whitespace is considered blank.
data_start : int
Line index for the start of data not counting comment or blank lines.
A line with only whitespace is considered blank.
data_end : int
Line index for the end of data not counting comment or blank lines.
This value can be negative to count from the end.
converters : dict
Dict of converters.
data_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split data columns.
header_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split header columns.
names : list
List of names corresponding to each data column.
include_names : list, optional
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``).
fill_values : tuple, list of tuple
Specification of fill values for bad or missing table values.
fill_include_names : list
List of names to include in fill_values.
fill_exclude_names : list
List of names to exclude from fill_values (applied after ``fill_include_names``).
Returns
-------
reader : `~astropy.io.ascii.BaseReader` subclass
ASCII format reader instance
"""
# This function is a light wrapper around core._get_reader to provide a
# public interface with a default Reader.
if Reader is None:
# Default reader is Basic unless fast reader is forced
fast_reader = _get_fast_reader_dict(kwargs)
if fast_reader['enable'] == 'force':
Reader = fastbasic.FastBasic
else:
Reader = basic.Basic
reader = core._get_reader(Reader, Inputter=Inputter, Outputter=Outputter, **kwargs)
return reader
def _get_format_class(format, ReaderWriter, label):
if format is not None and ReaderWriter is not None:
raise ValueError(f'Cannot supply both format and {label} keywords')
if format is not None:
if format in core.FORMAT_CLASSES:
ReaderWriter = core.FORMAT_CLASSES[format]
else:
raise ValueError('ASCII format {!r} not in allowed list {}'
.format(format, sorted(core.FORMAT_CLASSES)))
return ReaderWriter
def _get_fast_reader_dict(kwargs):
"""Convert 'fast_reader' key in kwargs into a dict if not already and make sure
'enable' key is available.
"""
fast_reader = copy.deepcopy(kwargs.get('fast_reader', True))
if isinstance(fast_reader, dict):
fast_reader.setdefault('enable', 'force')
else:
fast_reader = {'enable': fast_reader}
return fast_reader
def _validate_read_write_kwargs(read_write, **kwargs):
"""Validate types of keyword arg inputs to read() or write()."""
def is_ducktype(val, cls):
"""Check if ``val`` is an instance of ``cls`` or "seems" like one:
``cls(val) == val`` does not raise and exception and is `True`. In
this way you can pass in ``np.int16(2)`` and have that count as `int`.
This has a special-case of ``cls`` being 'list-like', meaning it is
an iterable but not a string.
"""
if cls == 'list-like':
ok = (not isinstance(val, str)
and isinstance(val, collections.abc.Iterable))
else:
ok = isinstance(val, cls)
if not ok:
# See if ``val`` walks and quacks like a ``cls```.
try:
new_val = cls(val)
assert new_val == val
except Exception:
ok = False
else:
ok = True
return ok
kwarg_types = READ_KWARG_TYPES if read_write == 'read' else WRITE_KWARG_TYPES
for arg, val in kwargs.items():
# Kwarg type checking is opt-in, so kwargs not in the list are considered OK.
# This reflects that some readers allow additional arguments that may not
# be well-specified, e.g. ```__init__(self, **kwargs)`` is an option.
if arg not in kwarg_types or val is None:
continue
# Single type or tuple of types for this arg (like isinstance())
types = kwarg_types[arg]
err_msg = (f"{read_write}() argument '{arg}' must be a "
f"{types} object, got {type(val)} instead")
# Force `types` to be a tuple for the any() check below
if not isinstance(types, tuple):
types = (types,)
if not any(is_ducktype(val, cls) for cls in types):
raise TypeError(err_msg)
def _expand_user_if_path(argument):
if isinstance(argument, (str, bytes, os.PathLike)):
# For the `read()` method, a `str` input can be either a file path or
# the table data itself. File names for io.ascii cannot have newlines
# in them and io.ascii does not accept table data as `bytes`, so we can
# attempt to detect data strings like this.
is_str_data = (isinstance(argument, str)
and ('\n' in argument or '\r' in argument))
if not is_str_data:
# Remain conservative in expanding the presumed-path
ex_user = os.path.expanduser(argument)
if os.path.exists(ex_user):
argument = ex_user
return argument
def read(table, guess=None, **kwargs):
# This the final output from reading. Static analysis indicates the reading
# logic (which is indeed complex) might not define `dat`, thus do so here.
dat = None
# Docstring defined below
del _read_trace[:]
# Downstream readers might munge kwargs
kwargs = copy.deepcopy(kwargs)
_validate_read_write_kwargs('read', **kwargs)
# Convert 'fast_reader' key in kwargs into a dict if not already and make sure
# 'enable' key is available.
fast_reader = _get_fast_reader_dict(kwargs)
kwargs['fast_reader'] = fast_reader
if fast_reader['enable'] and fast_reader.get('chunk_size'):
return _read_in_chunks(table, **kwargs)
if 'fill_values' not in kwargs:
kwargs['fill_values'] = [('', '0')]
# If an Outputter is supplied in kwargs that will take precedence.
if 'Outputter' in kwargs: # user specified Outputter, not supported for fast reading
fast_reader['enable'] = False
format = kwargs.get('format')
# Dictionary arguments are passed by reference per default and thus need
# special protection:
new_kwargs = copy.deepcopy(kwargs)
kwargs['fast_reader'] = copy.deepcopy(fast_reader)
# Get the Reader class based on possible format and Reader kwarg inputs.
Reader = _get_format_class(format, kwargs.get('Reader'), 'Reader')
if Reader is not None:
new_kwargs['Reader'] = Reader
format = Reader._format_name
# Remove format keyword if there, this is only allowed in read() not get_reader()
if 'format' in new_kwargs:
del new_kwargs['format']
if guess is None:
guess = _GUESS
if guess:
# If ``table`` is probably an HTML file then tell guess function to add
# the HTML reader at the top of the guess list. This is in response to
# issue #3691 (and others) where libxml can segfault on a long non-HTML
# file, thus prompting removal of the HTML reader from the default
# guess list.
new_kwargs['guess_html'] = _probably_html(table)
# If `table` is a filename or readable file object then read in the
# file now. This prevents problems in Python 3 with the file object
# getting closed or left at the file end. See #3132, #3013, #3109,
# #2001. If a `readme` arg was passed that implies CDS format, in
# which case the original `table` as the data filename must be left
# intact.
if 'readme' not in new_kwargs:
encoding = kwargs.get('encoding')
try:
table = _expand_user_if_path(table)
with get_readable_fileobj(table, encoding=encoding) as fileobj:
table = fileobj.read()
except ValueError: # unreadable or invalid binary file
raise
except Exception:
pass
else:
# Ensure that `table` has at least one \r or \n in it
# so that the core.BaseInputter test of
# ('\n' not in table and '\r' not in table)
# will fail and so `table` cannot be interpreted there
# as a filename. See #4160.
if not re.search(r'[\r\n]', table):
table = table + os.linesep
# If the table got successfully read then look at the content
# to see if is probably HTML, but only if it wasn't already
# identified as HTML based on the filename.
if not new_kwargs['guess_html']:
new_kwargs['guess_html'] = _probably_html(table)
# Get the table from guess in ``dat``. If ``dat`` comes back as None
# then there was just one set of kwargs in the guess list so fall
# through below to the non-guess way so that any problems result in a
# more useful traceback.
dat = _guess(table, new_kwargs, format, fast_reader)
if dat is None:
guess = False
if not guess:
if format is None:
reader = get_reader(**new_kwargs)
format = reader._format_name
table = _expand_user_if_path(table)
# Try the fast reader version of `format` first if applicable. Note that
# if user specified a fast format (e.g. format='fast_basic') this test
# will fail and the else-clause below will be used.
if fast_reader['enable'] and f'fast_{format}' in core.FAST_CLASSES:
fast_kwargs = copy.deepcopy(new_kwargs)
fast_kwargs['Reader'] = core.FAST_CLASSES[f'fast_{format}']
fast_reader_rdr = get_reader(**fast_kwargs)
try:
dat = fast_reader_rdr.read(table)
_read_trace.append({'kwargs': copy.deepcopy(fast_kwargs),
'Reader': fast_reader_rdr.__class__,
'status': 'Success with fast reader (no guessing)'})
except (core.ParameterError, cparser.CParserError, UnicodeEncodeError) as err:
# special testing value to avoid falling back on the slow reader
if fast_reader['enable'] == 'force':
raise core.InconsistentTableError(
f'fast reader {fast_reader_rdr.__class__} exception: {err}')
# If the fast reader doesn't work, try the slow version
reader = get_reader(**new_kwargs)
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(new_kwargs),
'Reader': reader.__class__,
'status': 'Success with slow reader after failing'
' with fast (no guessing)'})
else:
reader = get_reader(**new_kwargs)
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(new_kwargs),
'Reader': reader.__class__,
'status': 'Success with specified Reader class '
'(no guessing)'})
# Static analysis (pyright) indicates `dat` might be left undefined, so just
# to be sure define it at the beginning and check here.
if dat is None:
raise RuntimeError('read() function failed due to code logic error, '
'please report this bug on github')
return dat
read.__doc__ = core.READ_DOCSTRING
def _guess(table, read_kwargs, format, fast_reader):
"""
Try to read the table using various sets of keyword args. Start with the
standard guess list and filter to make it unique and consistent with
user-supplied read keyword args. Finally, if none of those work then
try the original user-supplied keyword args.
Parameters
----------
table : str, file-like, list
Input table as a file name, file-like object, list of strings, or
single newline-separated string.
read_kwargs : dict
Keyword arguments from user to be supplied to reader
format : str
Table format
fast_reader : dict
Options for the C engine fast reader. See read() function for details.
Returns
-------
dat : `~astropy.table.Table` or None
Output table or None if only one guess format was available
"""
# Keep a trace of all failed guesses kwarg
failed_kwargs = []
# Get an ordered list of read() keyword arg dicts that will be cycled
# through in order to guess the format.
full_list_guess = _get_guess_kwargs_list(read_kwargs)
# If a fast version of the reader is available, try that before the slow version
if (fast_reader['enable'] and format is not None and f'fast_{format}' in
core.FAST_CLASSES):
fast_kwargs = copy.deepcopy(read_kwargs)
fast_kwargs['Reader'] = core.FAST_CLASSES[f'fast_{format}']
full_list_guess = [fast_kwargs] + full_list_guess
else:
fast_kwargs = None
# Filter the full guess list so that each entry is consistent with user kwarg inputs.
# This also removes any duplicates from the list.
filtered_guess_kwargs = []
fast_reader = read_kwargs.get('fast_reader')
for guess_kwargs in full_list_guess:
# If user specified slow reader then skip all fast readers
if (fast_reader['enable'] is False
and guess_kwargs['Reader'] in core.FAST_CLASSES.values()):
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'Reader': guess_kwargs['Reader'].__class__,
'status': 'Disabled: reader only available in fast version',
'dt': f'{0.0:.3f} ms'})
continue
# If user required a fast reader then skip all non-fast readers
if (fast_reader['enable'] == 'force'
and guess_kwargs['Reader'] not in core.FAST_CLASSES.values()):
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'Reader': guess_kwargs['Reader'].__class__,
'status': 'Disabled: no fast version of reader available',
'dt': f'{0.0:.3f} ms'})
continue
guess_kwargs_ok = True # guess_kwargs are consistent with user_kwargs?
for key, val in read_kwargs.items():
# Do guess_kwargs.update(read_kwargs) except that if guess_args has
# a conflicting key/val pair then skip this guess entirely.
if key not in guess_kwargs:
guess_kwargs[key] = copy.deepcopy(val)
elif val != guess_kwargs[key] and guess_kwargs != fast_kwargs:
guess_kwargs_ok = False
break
if not guess_kwargs_ok:
# User-supplied kwarg is inconsistent with the guess-supplied kwarg, e.g.
# user supplies delimiter="|" but the guess wants to try delimiter=" ",
# so skip the guess entirely.
continue
# Add the guess_kwargs to filtered list only if it is not already there.
if guess_kwargs not in filtered_guess_kwargs:
filtered_guess_kwargs.append(guess_kwargs)
# If there are not at least two formats to guess then return no table
# (None) to indicate that guessing did not occur. In that case the
# non-guess read() will occur and any problems will result in a more useful
# traceback.
if len(filtered_guess_kwargs) <= 1:
return None
# Define whitelist of exceptions that are expected from readers when
# processing invalid inputs. Note that OSError must fall through here
# so one cannot simply catch any exception.
guess_exception_classes = (core.InconsistentTableError, ValueError, TypeError,
AttributeError, core.OptionalTableImportError,
core.ParameterError, cparser.CParserError)
# Now cycle through each possible reader and associated keyword arguments.
# Try to read the table using those args, and if an exception occurs then
# keep track of the failed guess and move on.
for guess_kwargs in filtered_guess_kwargs:
t0 = time.time()
try:
# If guessing will try all Readers then use strict req'ts on column names
if 'Reader' not in read_kwargs:
guess_kwargs['strict_names'] = True
reader = get_reader(**guess_kwargs)
reader.guessing = True
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'Reader': reader.__class__,
'status': 'Success (guessing)',
'dt': f'{(time.time() - t0) * 1000:.3f} ms'})
return dat
except guess_exception_classes as err:
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'status': f'{err.__class__.__name__}: {str(err)}',
'dt': f'{(time.time() - t0) * 1000:.3f} ms'})
failed_kwargs.append(guess_kwargs)
else:
# Failed all guesses, try the original read_kwargs without column requirements
try:
reader = get_reader(**read_kwargs)
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(read_kwargs),
'Reader': reader.__class__,
'status': 'Success with original kwargs without strict_names '
'(guessing)'})
return dat
except guess_exception_classes as err:
_read_trace.append({'kwargs': copy.deepcopy(read_kwargs),
'status': f'{err.__class__.__name__}: {str(err)}'})
failed_kwargs.append(read_kwargs)
lines = ['\nERROR: Unable to guess table format with the guesses listed below:']
for kwargs in failed_kwargs:
sorted_keys = sorted([x for x in sorted(kwargs)
if x not in ('Reader', 'Outputter')])
reader_repr = repr(kwargs.get('Reader', basic.Basic))
keys_vals = ['Reader:' + re.search(r"\.(\w+)'>", reader_repr).group(1)]
kwargs_sorted = ((key, kwargs[key]) for key in sorted_keys)
keys_vals.extend([f'{key}: {val!r}' for key, val in kwargs_sorted])
lines.append(' '.join(keys_vals))
msg = ['',
'************************************************************************',
'** ERROR: Unable to guess table format with the guesses listed above. **',
'** **',
'** To figure out why the table did not read, use guess=False and **',
'** fast_reader=False, along with any appropriate arguments to read(). **',
'** In particular specify the format and any known attributes like the **',
'** delimiter. **',
'************************************************************************']
lines.extend(msg)
raise core.InconsistentTableError('\n'.join(lines))
def _get_guess_kwargs_list(read_kwargs):
"""
Get the full list of reader keyword argument dicts that are the basis
for the format guessing process. The returned full list will then be:
- Filtered to be consistent with user-supplied kwargs
- Cleaned to have only unique entries
- Used one by one to try reading the input table
Note that the order of the guess list has been tuned over years of usage.
Maintainers need to be very careful about any adjustments as the
reasoning may not be immediately evident in all cases.
This list can (and usually does) include duplicates. This is a result
of the order tuning, but these duplicates get removed later.
Parameters
----------
read_kwargs : dict
User-supplied read keyword args
Returns
-------
guess_kwargs_list : list
List of read format keyword arg dicts
"""
guess_kwargs_list = []
# If the table is probably HTML based on some heuristics then start with the
# HTML reader.
if read_kwargs.pop('guess_html', None):
guess_kwargs_list.append(dict(Reader=html.HTML))
# Start with ECSV because an ECSV file will be read by Basic. This format
# has very specific header requirements and fails out quickly.
guess_kwargs_list.append(dict(Reader=ecsv.Ecsv))
# Now try readers that accept the user-supplied keyword arguments
# (actually include all here - check for compatibility of arguments later).
# FixedWidthTwoLine would also be read by Basic, so it needs to come first;
# same for RST.
for reader in (fixedwidth.FixedWidthTwoLine, rst.RST,
fastbasic.FastBasic, basic.Basic,
fastbasic.FastRdb, basic.Rdb,
fastbasic.FastTab, basic.Tab,
cds.Cds, mrt.Mrt, daophot.Daophot, sextractor.SExtractor,
ipac.Ipac, latex.Latex, latex.AASTex):
guess_kwargs_list.append(dict(Reader=reader))
# Cycle through the basic-style readers using all combinations of delimiter
# and quotechar.
for Reader in (fastbasic.FastCommentedHeader, basic.CommentedHeader,
fastbasic.FastBasic, basic.Basic,
fastbasic.FastNoHeader, basic.NoHeader):
for delimiter in ("|", ",", " ", r"\s"):
for quotechar in ('"', "'"):
guess_kwargs_list.append(dict(
Reader=Reader, delimiter=delimiter, quotechar=quotechar))
return guess_kwargs_list
def _read_in_chunks(table, **kwargs):
"""
For fast_reader read the ``table`` in chunks and vstack to create
a single table, OR return a generator of chunk tables.
"""
fast_reader = kwargs['fast_reader']
chunk_size = fast_reader.pop('chunk_size')
chunk_generator = fast_reader.pop('chunk_generator', False)
fast_reader['parallel'] = False # No parallel with chunks
tbl_chunks = _read_in_chunks_generator(table, chunk_size, **kwargs)
if chunk_generator:
return tbl_chunks
tbl0 = next(tbl_chunks)
masked = tbl0.masked
# Numpy won't allow resizing the original so make a copy here.
out_cols = {col.name: col.data.copy() for col in tbl0.itercols()}
str_kinds = ('S', 'U')
for tbl in tbl_chunks:
masked |= tbl.masked
for name, col in tbl.columns.items():
# Concatenate current column data and new column data
# If one of the inputs is string-like and the other is not, then
# convert the non-string to a string. In a perfect world this would
# be handled by numpy, but as of numpy 1.13 this results in a string
# dtype that is too long (https://github.com/numpy/numpy/issues/10062).
col1, col2 = out_cols[name], col.data
if col1.dtype.kind in str_kinds and col2.dtype.kind not in str_kinds:
col2 = np.array(col2.tolist(), dtype=col1.dtype.kind)
elif col2.dtype.kind in str_kinds and col1.dtype.kind not in str_kinds:
col1 = np.array(col1.tolist(), dtype=col2.dtype.kind)
# Choose either masked or normal concatenation
concatenate = np.ma.concatenate if masked else np.concatenate
out_cols[name] = concatenate([col1, col2])
# Make final table from numpy arrays, converting dict to list
out_cols = [out_cols[name] for name in tbl0.colnames]
out = tbl0.__class__(out_cols, names=tbl0.colnames, meta=tbl0.meta,
copy=False)
return out
def _read_in_chunks_generator(table, chunk_size, **kwargs):
"""
For fast_reader read the ``table`` in chunks and return a generator
of tables for each chunk.
"""
@contextlib.contextmanager
def passthrough_fileobj(fileobj, encoding=None):
"""Stub for get_readable_fileobj, which does not seem to work in Py3
for input file-like object, see #6460"""
yield fileobj
# Set up to coerce `table` input into a readable file object by selecting
# an appropriate function.
# Convert table-as-string to a File object. Finding a newline implies
# that the string is not a filename.
if (isinstance(table, str) and ('\n' in table or '\r' in table)):
table = StringIO(table)
fileobj_context = passthrough_fileobj
elif hasattr(table, 'read') and hasattr(table, 'seek'):
fileobj_context = passthrough_fileobj
else:
# string filename or pathlib
fileobj_context = get_readable_fileobj
# Set up for iterating over chunks
kwargs['fast_reader']['return_header_chars'] = True
header = '' # Table header (up to start of data)
prev_chunk_chars = '' # Chars from previous chunk after last newline
first_chunk = True # True for the first chunk, False afterward
with fileobj_context(table, encoding=kwargs.get('encoding')) as fh:
while True:
chunk = fh.read(chunk_size)
# Got fewer chars than requested, must be end of file
final_chunk = len(chunk) < chunk_size
# If this is the last chunk and there is only whitespace then break
if final_chunk and not re.search(r'\S', chunk):
break
# Step backwards from last character in chunk and find first newline
for idx in range(len(chunk) - 1, -1, -1):
if final_chunk or chunk[idx] == '\n':
break
else:
raise ValueError('no newline found in chunk (chunk_size too small?)')
# Stick on the header to the chunk part up to (and including) the
# last newline. Make sure the small strings are concatenated first.
complete_chunk = (header + prev_chunk_chars) + chunk[:idx + 1]
prev_chunk_chars = chunk[idx + 1:]
# Now read the chunk as a complete table
tbl = read(complete_chunk, guess=False, **kwargs)
# For the first chunk pop the meta key which contains the header
# characters (everything up to the start of data) then fix kwargs
# so it doesn't return that in meta any more.
if first_chunk:
header = tbl.meta.pop('__ascii_fast_reader_header_chars__')
first_chunk = False
yield tbl
if final_chunk:
break
extra_writer_pars = ('delimiter', 'comment', 'quotechar', 'formats',
'names', 'include_names', 'exclude_names', 'strip_whitespace')
def get_writer(Writer=None, fast_writer=True, **kwargs):
"""
Initialize a table writer allowing for common customizations. Most of the
default behavior for various parameters is determined by the Writer class.
Parameters
----------
Writer : ``Writer``
Writer class (DEPRECATED). Defaults to :class:`Basic`.
delimiter : str
Column delimiter string
comment : str
String defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
formats : dict
Dictionary of format specifiers or formatting functions
strip_whitespace : bool
Strip surrounding whitespace from column values.
names : list
List of names corresponding to each data column
include_names : list
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
fast_writer : bool
Whether to use the fast Cython writer.
Returns
-------
writer : `~astropy.io.ascii.BaseReader` subclass
ASCII format writer instance
"""
if Writer is None:
Writer = basic.Basic
if 'strip_whitespace' not in kwargs:
kwargs['strip_whitespace'] = True
writer = core._get_writer(Writer, fast_writer, **kwargs)
# Handle the corner case of wanting to disable writing table comments for the
# commented_header format. This format *requires* a string for `write_comment`
# because that is used for the header column row, so it is not possible to
# set the input `comment` to None. Without adding a new keyword or assuming
# a default comment character, there is no other option but to tell user to
# simply remove the meta['comments'].
if (isinstance(writer, (basic.CommentedHeader, fastbasic.FastCommentedHeader))
and not isinstance(kwargs.get('comment', ''), str)):
raise ValueError("for the commented_header writer you must supply a string\n"
"value for the `comment` keyword. In order to disable writing\n"
"table comments use `del t.meta['comments']` prior to writing.")
return writer
def write(table, output=None, format=None, Writer=None, fast_writer=True, *,
overwrite=False, **kwargs):
# Docstring inserted below
_validate_read_write_kwargs('write', format=format, fast_writer=fast_writer,
overwrite=overwrite, **kwargs)
if isinstance(output, (str, bytes, os.PathLike)):
output = os.path.expanduser(output)
if not overwrite and os.path.lexists(output):
raise OSError(NOT_OVERWRITING_MSG.format(output))
if output is None:
output = sys.stdout
# Ensure that `table` is a Table subclass.
names = kwargs.get('names')
if isinstance(table, Table):
# While we are only going to read data from columns, we may need to
# to adjust info attributes such as format, so we make a shallow copy.
table = table.__class__(table, names=names, copy=False)
else:
# Otherwise, create a table from the input.
table = Table(table, names=names, copy=False)
table0 = table[:0].copy()
core._apply_include_exclude_names(table0, kwargs.get('names'),
kwargs.get('include_names'), kwargs.get('exclude_names'))
diff_format_with_names = set(kwargs.get('formats', [])) - set(table0.colnames)
if diff_format_with_names:
warnings.warn(
'The key(s) {} specified in the formats argument do not match a column name.'
.format(diff_format_with_names), AstropyWarning)
if table.has_mixin_columns:
fast_writer = False
Writer = _get_format_class(format, Writer, 'Writer')
writer = get_writer(Writer=Writer, fast_writer=fast_writer, **kwargs)
if writer._format_name in core.FAST_CLASSES:
writer.write(table, output)
return
lines = writer.write(table)
# Write the lines to output
outstr = os.linesep.join(lines)
if not hasattr(output, 'write'):
# NOTE: we need to specify newline='', otherwise the default
# behavior is for Python to translate \r\n (which we write because
# of os.linesep) into \r\r\n. Specifying newline='' disables any
# auto-translation.
output = open(output, 'w', newline='')
output.write(outstr)
output.write(os.linesep)
output.close()
else:
output.write(outstr)
output.write(os.linesep)
write.__doc__ = core.WRITE_DOCSTRING
def get_read_trace():
"""
Return a traceback of the attempted read formats for the last call to
`~astropy.io.ascii.read` where guessing was enabled. This is primarily for
debugging.
The return value is a list of dicts, where each dict includes the keyword
args ``kwargs`` used in the read call and the returned ``status``.
Returns
-------
trace : list of dict
Ordered list of format guesses and status
"""
return copy.deepcopy(_read_trace)
|
a327560d615f6624ae148793fbffb1b5ec598100a2f65ecf153e1bb08be3db5b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains functions for reading and writing HDF5 tables that are
not meant to be used directly, but instead are available as readers/writers in
`astropy.table`. See :ref:`astropy:table_io` for more details.
"""
import os
import warnings
import numpy as np
# NOTE: Do not import anything from astropy.table here.
# https://github.com/astropy/astropy/issues/6604
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
HDF5_SIGNATURE = b'\x89HDF\r\n\x1a\n'
META_KEY = '__table_column_meta__'
__all__ = ['read_table_hdf5', 'write_table_hdf5']
def meta_path(path):
return path + '.' + META_KEY
def _find_all_structured_arrays(handle):
"""
Find all structured arrays in an HDF5 file
"""
import h5py
structured_arrays = []
def append_structured_arrays(name, obj):
if isinstance(obj, h5py.Dataset) and obj.dtype.kind == 'V':
structured_arrays.append(name)
handle.visititems(append_structured_arrays)
return structured_arrays
def is_hdf5(origin, filepath, fileobj, *args, **kwargs):
if fileobj is not None:
loc = fileobj.tell()
try:
signature = fileobj.read(8)
finally:
fileobj.seek(loc)
return signature == HDF5_SIGNATURE
elif filepath is not None:
return filepath.endswith(('.hdf5', '.h5'))
try:
import h5py
except ImportError:
return False
else:
return isinstance(args[0], (h5py.File, h5py.Group, h5py.Dataset))
def read_table_hdf5(input, path=None, character_as_bytes=True):
"""
Read a Table object from an HDF5 file
This requires `h5py <http://www.h5py.org/>`_ to be installed. If more than one
table is present in the HDF5 file or group, the first table is read in and
a warning is displayed.
Parameters
----------
input : str or :class:`h5py.File` or :class:`h5py.Group` or
:class:`h5py.Dataset` If a string, the filename to read the table from.
If an h5py object, either the file or the group object to read the
table from.
path : str
The path from which to read the table inside the HDF5 file.
This should be relative to the input file or group.
character_as_bytes : bool
If `True` then Table columns are left as bytes.
If `False` then Table columns are converted to unicode.
"""
try:
import h5py
except ImportError:
raise Exception("h5py is required to read and write HDF5 files")
# This function is iterative, and only gets to writing the file when
# the input is an hdf5 Group. Moreover, the input variable is changed in
# place.
# Here, we save its value to be used at the end when the conditions are
# right.
input_save = input
if isinstance(input, (h5py.File, h5py.Group)):
# If a path was specified, follow the path
if path is not None:
try:
input = input[path]
except (KeyError, ValueError):
raise OSError(f"Path {path} does not exist")
# `input` is now either a group or a dataset. If it is a group, we
# will search for all structured arrays inside the group, and if there
# is one we can proceed otherwise an error is raised. If it is a
# dataset, we just proceed with the reading.
if isinstance(input, h5py.Group):
# Find all structured arrays in group
arrays = _find_all_structured_arrays(input)
if len(arrays) == 0:
raise ValueError(f"no table found in HDF5 group {path}")
elif len(arrays) > 0:
path = arrays[0] if path is None else path + '/' + arrays[0]
if len(arrays) > 1:
warnings.warn("path= was not specified but multiple tables"
" are present, reading in first available"
" table (path={})".format(path),
AstropyUserWarning)
return read_table_hdf5(input, path=path)
elif not isinstance(input, h5py.Dataset):
# If a file object was passed, then we need to extract the filename
# because h5py cannot properly read in file objects.
if hasattr(input, 'read'):
try:
input = input.name
except AttributeError:
raise TypeError("h5py can only open regular files")
# Open the file for reading, and recursively call read_table_hdf5 with
# the file object and the path.
f = h5py.File(input, 'r')
try:
return read_table_hdf5(f, path=path, character_as_bytes=character_as_bytes)
finally:
f.close()
# If we are here, `input` should be a Dataset object, which we can now
# convert to a Table.
# Create a Table object
from astropy.table import Table, meta, serialize
table = Table(np.array(input))
# Read the meta-data from the file. For back-compatibility, we can read
# the old file format where the serialized metadata were saved in the
# attributes of the HDF5 dataset.
# In the new format, instead, metadata are stored in a new dataset in the
# same file. This is introduced in Astropy 3.0
old_version_meta = META_KEY in input.attrs
new_version_meta = path is not None and meta_path(path) in input_save
if old_version_meta or new_version_meta:
if new_version_meta:
header = meta.get_header_from_yaml(
h.decode('utf-8') for h in input_save[meta_path(path)])
else:
# Must be old_version_meta is True. if (A or B) and not A then B is True
header = meta.get_header_from_yaml(
h.decode('utf-8') for h in input.attrs[META_KEY])
if 'meta' in list(header.keys()):
table.meta = header['meta']
header_cols = dict((x['name'], x) for x in header['datatype'])
for col in table.columns.values():
for attr in ('description', 'format', 'unit', 'meta'):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
# Construct new table with mixins, using tbl.meta['__serialized_columns__']
# as guidance.
table = serialize._construct_mixins_from_columns(table)
else:
# Read the meta-data from the file
table.meta.update(input.attrs)
if not character_as_bytes:
table.convert_bytestring_to_unicode()
return table
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
from astropy.table import serialize
from astropy import units as u
from astropy.utils.data_info import serialize_context_as
# Convert the table to one with no mixins, only Column objects. This adds
# meta data which is extracted with meta.get_yaml_from_table.
with serialize_context_as('hdf5'):
encode_tbl = serialize.represent_mixins_as_columns(tbl)
return encode_tbl
def write_table_hdf5(table, output, path=None, compression=False,
append=False, overwrite=False, serialize_meta=False,
**create_dataset_kwargs):
"""
Write a Table object to an HDF5 file
This requires `h5py <http://www.h5py.org/>`_ to be installed.
Parameters
----------
table : `~astropy.table.Table`
Data table that is to be written to file.
output : str or :class:`h5py.File` or :class:`h5py.Group`
If a string, the filename to write the table to. If an h5py object,
either the file or the group object to write the table to.
path : str
The path to which to write the table inside the HDF5 file.
This should be relative to the input file or group.
If not specified, defaults to ``__astropy_table__``.
compression : bool or str or int
Whether to compress the table inside the HDF5 file. If set to `True`,
``'gzip'`` compression is used. If a string is specified, it should be
one of ``'gzip'``, ``'szip'``, or ``'lzf'``. If an integer is
specified (in the range 0-9), ``'gzip'`` compression is used, and the
integer denotes the compression level.
append : bool
Whether to append the table to an existing HDF5 file.
overwrite : bool
Whether to overwrite any existing file without warning.
If ``append=True`` and ``overwrite=True`` then only the dataset will be
replaced; the file/group will not be overwritten.
serialize_meta : bool
Whether to serialize rich table meta-data when writing the HDF5 file, in
particular such data required to write and read back mixin columns like
``Time``, ``SkyCoord``, or ``Quantity`` to the file.
**create_dataset_kwargs
Additional keyword arguments are passed to
``h5py.File.create_dataset()`` or ``h5py.Group.create_dataset()``.
"""
from astropy.table import meta
try:
import h5py
except ImportError:
raise Exception("h5py is required to read and write HDF5 files")
if path is None:
# table is just an arbitrary, hardcoded string here.
path = '__astropy_table__'
elif path.endswith('/'):
raise ValueError("table path should end with table name, not /")
if '/' in path:
group, name = path.rsplit('/', 1)
else:
group, name = None, path
if isinstance(output, (h5py.File, h5py.Group)):
if len(list(output.keys())) > 0 and name == '__astropy_table__':
raise ValueError("table path should always be set via the "
"path= argument when writing to existing "
"files")
elif name == '__astropy_table__':
warnings.warn("table path was not set via the path= argument; "
"using default path {}".format(path))
if group:
try:
output_group = output[group]
except (KeyError, ValueError):
output_group = output.create_group(group)
else:
output_group = output
elif isinstance(output, str):
if os.path.exists(output) and not append:
if overwrite and not append:
os.remove(output)
else:
raise OSError(NOT_OVERWRITING_MSG.format(output))
# Open the file for appending or writing
f = h5py.File(output, 'a' if append else 'w')
# Recursively call the write function
try:
return write_table_hdf5(table, f, path=path,
compression=compression, append=append,
overwrite=overwrite,
serialize_meta=serialize_meta)
finally:
f.close()
else:
raise TypeError('output should be a string or an h5py File or '
'Group object')
# Check whether table already exists
if name in output_group:
if append and overwrite:
# Delete only the dataset itself
del output_group[name]
if serialize_meta and name + '.__table_column_meta__' in output_group:
del output_group[name + '.__table_column_meta__']
else:
raise OSError(f"Table {path} already exists")
# Encode any mixin columns as plain columns + appropriate metadata
table = _encode_mixins(table)
# Table with numpy unicode strings can't be written in HDF5 so
# to write such a table a copy of table is made containing columns as
# bytestrings. Now this copy of the table can be written in HDF5.
if any(col.info.dtype.kind == 'U' for col in table.itercols()):
table = table.copy(copy_data=False)
table.convert_unicode_to_bytestring()
# Warn if information will be lost when serialize_meta=False. This is
# hardcoded to the set difference between column info attributes and what
# HDF5 can store natively (name, dtype) with no meta.
if serialize_meta is False:
for col in table.itercols():
for attr in ('unit', 'format', 'description', 'meta'):
if getattr(col.info, attr, None) not in (None, {}):
warnings.warn("table contains column(s) with defined 'unit', 'format',"
" 'description', or 'meta' info attributes. These will"
" be dropped since serialize_meta=False.",
AstropyUserWarning)
# Write the table to the file
if compression:
if compression is True:
compression = 'gzip'
dset = output_group.create_dataset(name, data=table.as_array(),
compression=compression,
**create_dataset_kwargs)
else:
dset = output_group.create_dataset(name, data=table.as_array(),
**create_dataset_kwargs)
if serialize_meta:
header_yaml = meta.get_yaml_from_table(table)
header_encoded = np.array([h.encode('utf-8') for h in header_yaml])
output_group.create_dataset(meta_path(name),
data=header_encoded)
else:
# Write the Table meta dict key:value pairs to the file as HDF5
# attributes. This works only for a limited set of scalar data types
# like numbers, strings, etc., but not any complex types. This path
# also ignores column meta like unit or format.
for key in table.meta:
val = table.meta[key]
try:
dset.attrs[key] = val
except TypeError:
warnings.warn("Attribute `{}` of type {} cannot be written to "
"HDF5 files - skipping. (Consider specifying "
"serialize_meta=True to write all meta data)".format(key, type(val)),
AstropyUserWarning)
def register_hdf5():
"""
Register HDF5 with Unified I/O.
"""
from astropy.io import registry as io_registry
from astropy.table import Table
io_registry.register_reader('hdf5', Table, read_table_hdf5)
io_registry.register_writer('hdf5', Table, write_table_hdf5)
io_registry.register_identifier('hdf5', Table, is_hdf5)
|
8378a1ea154b6e22359ca05b56d0dc389b0d929044babc7bc42f5ac240f1e3d7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Mixin columns for use in ascii/tests/test_ecsv.py, fits/tests/test_connect.py,
and misc/tests/test_hdf5.py
"""
import numpy as np
from astropy import coordinates, table, time, units as u
el = coordinates.EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
sr = coordinates.SphericalRepresentation(
[0, 1]*u.deg, [2, 3]*u.deg, 1*u.kpc)
cr = coordinates.CartesianRepresentation(
[0, 1]*u.pc, [4, 5]*u.pc, [8, 6]*u.pc)
sd = coordinates.SphericalCosLatDifferential(
[0, 1]*u.mas/u.yr, [0, 1]*u.mas/u.yr, 10*u.km/u.s)
srd = coordinates.SphericalRepresentation(
sr, differentials=sd)
sc = coordinates.SkyCoord([1, 2], [3, 4], unit='deg,deg',
frame='fk4', obstime='J1990.5')
scd = coordinates.SkyCoord(
[1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4',
obstime=['J1990.5'] * 2)
scdc = scd.copy()
scdc.representation_type = 'cartesian'
scpm = coordinates.SkyCoord(
[1, 2], [3, 4], [5, 6], unit='deg,deg,pc',
pm_ra_cosdec=[7, 8]*u.mas/u.yr, pm_dec=[9, 10]*u.mas/u.yr)
scpmrv = coordinates.SkyCoord(
[1, 2], [3, 4], [5, 6], unit='deg,deg,pc',
pm_ra_cosdec=[7, 8]*u.mas/u.yr, pm_dec=[9, 10]*u.mas/u.yr,
radial_velocity=[11, 12]*u.km/u.s)
scrv = coordinates.SkyCoord(
[1, 2], [3, 4], [5, 6], unit='deg,deg,pc',
radial_velocity=[11, 12]*u.km/u.s)
tm = time.Time([51000.5, 51001.5], format='mjd', scale='tai',
precision=5, location=el[0])
tm2 = time.Time(tm, precision=3, format='iso')
tm3 = time.Time(tm, location=el)
tm3.info.serialize_method['ecsv'] = 'jd1_jd2'
obj = table.Column([{'a': 1}, {'b': [2]}], dtype='object')
su = table.Column([(1, (1.5, 1.6)),
(2, (2.5, 2.6))],
name='su',
dtype=[('i', np.int64),
('f', [('p1', np.float64), ('p0', np.float64)])])
su2 = table.Column([(['snake', 'c'], [1.6, 1.5]),
(['eal', 'a'], [2.5, 2.6])],
dtype=[('name', 'U5', (2,)), ('f', 'f8', (2,))])
# NOTE: for testing, the name of the column "x" for the
# Quantity is important since it tests the fix for #10215
# (namespace clash, where "x" clashes with "el.x").
mixin_cols = {
'tm': tm,
'tm2': tm2,
'tm3': tm3,
'dt': time.TimeDelta([1, 2] * u.day),
'sc': sc,
'scd': scd,
'scdc': scdc,
'scpm': scpm,
'scpmrv': scpmrv,
'scrv': scrv,
'x': [1, 2] * u.m,
'qdb': [10, 20] * u.dB(u.mW),
'qdex': [4.5, 5.5] * u.dex(u.cm / u.s**2),
'qmag': [21, 22] * u.ABmag,
'lat': coordinates.Latitude([1, 2] * u.deg),
'lon': coordinates.Longitude([1, 2] * u.deg, wrap_angle=180. * u.deg),
'ang': coordinates.Angle([1, 2] * u.deg),
'el': el,
'sr': sr,
'cr': cr,
'sd': sd,
'srd': srd,
'nd': table.NdarrayMixin([1, 2]),
'obj': obj,
'su': su,
'su2': su2,
}
time_attrs = ['value', 'shape', 'format', 'scale', 'precision',
'in_subfmt', 'out_subfmt', 'location']
compare_attrs = {
'tm': time_attrs,
'tm2': time_attrs,
'tm3': time_attrs,
'dt': ['shape', 'value', 'format', 'scale'],
'sc': ['ra', 'dec', 'representation_type', 'frame.name'],
'scd': ['ra', 'dec', 'distance', 'representation_type', 'frame.name'],
'scdc': ['x', 'y', 'z', 'representation_type', 'frame.name'],
'scpm': ['ra', 'dec', 'distance', 'pm_ra_cosdec', 'pm_dec',
'representation_type', 'frame.name'],
'scpmrv': ['ra', 'dec', 'distance', 'pm_ra_cosdec', 'pm_dec',
'radial_velocity', 'representation_type', 'frame.name'],
'scrv': ['ra', 'dec', 'distance', 'radial_velocity',
'representation_type', 'frame.name'],
'x': ['value', 'unit'],
'qdb': ['value', 'unit'],
'qdex': ['value', 'unit'],
'qmag': ['value', 'unit'],
'lon': ['value', 'unit', 'wrap_angle'],
'lat': ['value', 'unit'],
'ang': ['value', 'unit'],
'el': ['x', 'y', 'z', 'ellipsoid'],
'nd': ['data'],
'sr': ['lon', 'lat', 'distance'],
'cr': ['x', 'y', 'z'],
'sd': ['d_lon_coslat', 'd_lat', 'd_distance'],
'srd': ['lon', 'lat', 'distance', 'differentials.s.d_lon_coslat',
'differentials.s.d_lat', 'differentials.s.d_distance'],
'obj': [],
'su': ['i', 'f.p0', 'f.p1'],
'su2': ['name', 'f'],
}
non_trivial_names = {
'cr': ['cr.x', 'cr.y', 'cr.z'],
'dt': ['dt.jd1', 'dt.jd2'],
'el': ['el.x', 'el.y', 'el.z'],
'sc': ['sc.ra', 'sc.dec'],
'scd': ['scd.ra', 'scd.dec', 'scd.distance',
'scd.obstime.jd1', 'scd.obstime.jd2'],
'scdc': ['scdc.x', 'scdc.y', 'scdc.z',
'scdc.obstime.jd1', 'scdc.obstime.jd2'],
'scfc': ['scdc.x', 'scdc.y', 'scdc.z',
'scdc.obstime.jd1', 'scdc.obstime.jd2'],
'scpm': ['scpm.ra', 'scpm.dec', 'scpm.distance',
'scpm.pm_ra_cosdec', 'scpm.pm_dec'],
'scpmrv': ['scpmrv.ra', 'scpmrv.dec', 'scpmrv.distance',
'scpmrv.pm_ra_cosdec', 'scpmrv.pm_dec',
'scpmrv.radial_velocity'],
'scrv': ['scrv.ra', 'scrv.dec', 'scrv.distance',
'scrv.radial_velocity'],
'sd': ['sd.d_lon_coslat', 'sd.d_lat', 'sd.d_distance'],
'sr': ['sr.lon', 'sr.lat', 'sr.distance'],
'srd': ['srd.lon', 'srd.lat', 'srd.distance',
'srd.differentials.s.d_lon_coslat',
'srd.differentials.s.d_lat',
'srd.differentials.s.d_distance'],
'su': ['su.i', 'su.f.p1', 'su.f.p0'],
'su2': ['su2.name', 'su2.f'],
'tm': ['tm.jd1', 'tm.jd2'],
'tm2': ['tm2.jd1', 'tm2.jd2'],
'tm3': ['tm3.jd1', 'tm3.jd2',
'tm3.location.x', 'tm3.location.y', 'tm3.location.z'],
}
serialized_names = {name: non_trivial_names.get(name, [name])
for name in sorted(mixin_cols)}
|
9c7f66dccf082e9e5e84605e7da12645e82056bb977a25f0d6b9bfc3542e29ee | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import sys
from collections import OrderedDict
import numpy as np
from .base import IORegistryError, _UnifiedIORegistryBase
__all__ = ['UnifiedIORegistry', 'UnifiedInputRegistry', 'UnifiedOutputRegistry']
PATH_TYPES = (str, os.PathLike) # TODO! include bytes
def _expand_user_in_args(args):
# Conservatively attempt to apply `os.path.expanduser` to the first
# argument, which can be either a path or the contents of a table.
if len(args) and isinstance(args[0], PATH_TYPES):
ex_user = os.path.expanduser(args[0])
if ex_user != args[0] and os.path.exists(os.path.dirname(ex_user)):
args = (ex_user,) + args[1:]
return args
# -----------------------------------------------------------------------------
class UnifiedInputRegistry(_UnifiedIORegistryBase):
"""Read-only Unified Registry.
.. versionadded:: 5.0
Examples
--------
First let's start by creating a read-only registry.
.. code-block:: python
>>> from astropy.io.registry import UnifiedInputRegistry
>>> read_reg = UnifiedInputRegistry()
There is nothing in this registry. Let's make a reader for the
:class:`~astropy.table.Table` class::
from astropy.table import Table
def my_table_reader(filename, some_option=1):
# Read in the table by any means necessary
return table # should be an instance of Table
Such a function can then be registered with the I/O registry::
read_reg.register_reader('my-table-format', Table, my_table_reader)
Note that we CANNOT then read in a table with::
d = Table.read('my_table_file.mtf', format='my-table-format')
Why? because ``Table.read`` uses Astropy's default global registry and this
is a separate registry.
Instead we can read by the read method on the registry::
d = read_reg.read(Table, 'my_table_file.mtf', format='my-table-format')
"""
def __init__(self):
super().__init__() # set _identifiers
self._readers = OrderedDict()
self._registries["read"] = dict(attr="_readers", column="Read")
self._registries_order = ("read", "identify")
# =========================================================================
# Read methods
def register_reader(self, data_format, data_class, function, force=False,
priority=0):
"""
Register a reader function.
Parameters
----------
data_format : str
The data format identifier. This is the string that will be used to
specify the data type when reading.
data_class : class
The class of the object that the reader produces.
function : function
The function to read in a data object.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
priority : int, optional
The priority of the reader, used to compare possible formats when
trying to determine the best reader to use. Higher priorities are
preferred over lower priorities, with the default priority being 0
(negative numbers are allowed though).
"""
if not (data_format, data_class) in self._readers or force:
self._readers[(data_format, data_class)] = function, priority
else:
raise IORegistryError("Reader for format '{}' and class '{}' is "
'already defined'
''.format(data_format, data_class.__name__))
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, 'read')
def unregister_reader(self, data_format, data_class):
"""
Unregister a reader function
Parameters
----------
data_format : str
The data format identifier.
data_class : class
The class of the object that the reader produces.
"""
if (data_format, data_class) in self._readers:
self._readers.pop((data_format, data_class))
else:
raise IORegistryError("No reader defined for format '{}' and class '{}'"
''.format(data_format, data_class.__name__))
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, 'read')
def get_reader(self, data_format, data_class):
"""Get reader for ``data_format``.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : class
The class of the object that can be written.
Returns
-------
reader : callable
The registered reader function for this format and class.
"""
readers = [(fmt, cls) for fmt, cls in self._readers if fmt == data_format]
for reader_format, reader_class in readers:
if self._is_best_match(data_class, reader_class, readers):
return self._readers[(reader_format, reader_class)][0]
else:
format_table_str = self._get_format_table_str(data_class, 'Read')
raise IORegistryError(
"No reader defined for format '{}' and class '{}'.\n\nThe "
"available formats are:\n\n{}".format(
data_format, data_class.__name__, format_table_str))
def read(self, cls, *args, format=None, cache=False, **kwargs):
"""
Read in data.
Parameters
----------
cls : class
*args
The arguments passed to this method depend on the format.
format : str or None
cache : bool
Whether to cache the results of reading in the data.
**kwargs
The arguments passed to this method depend on the format.
Returns
-------
object or None
The output of the registered reader.
"""
ctx = None
try:
# Expand a tilde-prefixed path if present in args[0]
args = _expand_user_in_args(args)
if format is None:
path = None
fileobj = None
if len(args):
if isinstance(args[0], PATH_TYPES) and not os.path.isdir(args[0]):
from astropy.utils.data import get_readable_fileobj
# path might be a os.PathLike object
if isinstance(args[0], os.PathLike):
args = (os.fspath(args[0]),) + args[1:]
path = args[0]
try:
ctx = get_readable_fileobj(args[0], encoding='binary', cache=cache)
fileobj = ctx.__enter__()
except OSError:
raise
except Exception:
fileobj = None
else:
args = [fileobj] + list(args[1:])
elif hasattr(args[0], 'read'):
path = None
fileobj = args[0]
format = self._get_valid_format(
'read', cls, path, fileobj, args, kwargs)
reader = self.get_reader(format, cls)
data = reader(*args, **kwargs)
if not isinstance(data, cls):
# User has read with a subclass where only the parent class is
# registered. This returns the parent class, so try coercing
# to desired subclass.
try:
data = cls(data)
except Exception:
raise TypeError('could not convert reader output to {} '
'class.'.format(cls.__name__))
finally:
if ctx is not None:
ctx.__exit__(*sys.exc_info())
return data
# -----------------------------------------------------------------------------
class UnifiedOutputRegistry(_UnifiedIORegistryBase):
"""Write-only Registry.
.. versionadded:: 5.0
"""
def __init__(self):
super().__init__()
self._writers = OrderedDict()
self._registries["write"] = dict(attr="_writers", column="Write")
self._registries_order = ("write", "identify", )
# =========================================================================
# Write Methods
def register_writer(self, data_format, data_class, function, force=False, priority=0):
"""
Register a table writer function.
Parameters
----------
data_format : str
The data format identifier. This is the string that will be used to
specify the data type when writing.
data_class : class
The class of the object that can be written.
function : function
The function to write out a data object.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
priority : int, optional
The priority of the writer, used to compare possible formats when trying
to determine the best writer to use. Higher priorities are preferred
over lower priorities, with the default priority being 0 (negative
numbers are allowed though).
"""
if not (data_format, data_class) in self._writers or force:
self._writers[(data_format, data_class)] = function, priority
else:
raise IORegistryError("Writer for format '{}' and class '{}' is "
'already defined'
''.format(data_format, data_class.__name__))
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, 'write')
def unregister_writer(self, data_format, data_class):
"""
Unregister a writer function
Parameters
----------
data_format : str
The data format identifier.
data_class : class
The class of the object that can be written.
"""
if (data_format, data_class) in self._writers:
self._writers.pop((data_format, data_class))
else:
raise IORegistryError("No writer defined for format '{}' and class '{}'"
''.format(data_format, data_class.__name__))
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, 'write')
def get_writer(self, data_format, data_class):
"""Get writer for ``data_format``.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : class
The class of the object that can be written.
Returns
-------
writer : callable
The registered writer function for this format and class.
"""
writers = [(fmt, cls) for fmt, cls in self._writers if fmt == data_format]
for writer_format, writer_class in writers:
if self._is_best_match(data_class, writer_class, writers):
return self._writers[(writer_format, writer_class)][0]
else:
format_table_str = self._get_format_table_str(data_class, 'Write')
raise IORegistryError(
"No writer defined for format '{}' and class '{}'.\n\nThe "
"available formats are:\n\n{}".format(
data_format, data_class.__name__, format_table_str))
def write(self, data, *args, format=None, **kwargs):
"""
Write out data.
Parameters
----------
data : object
The data to write.
*args
The arguments passed to this method depend on the format.
format : str or None
**kwargs
The arguments passed to this method depend on the format.
Returns
-------
object or None
The output of the registered writer. Most often `None`.
.. versionadded:: 4.3
"""
# Expand a tilde-prefixed path if present in args[0]
args = _expand_user_in_args(args)
if format is None:
path = None
fileobj = None
if len(args):
if isinstance(args[0], PATH_TYPES):
# path might be a os.PathLike object
if isinstance(args[0], os.PathLike):
args = (os.fspath(args[0]),) + args[1:]
path = args[0]
fileobj = None
elif hasattr(args[0], 'read'):
path = None
fileobj = args[0]
format = self._get_valid_format(
'write', data.__class__, path, fileobj, args, kwargs)
writer = self.get_writer(format, data.__class__)
return writer(data, *args, **kwargs)
# -----------------------------------------------------------------------------
class UnifiedIORegistry(UnifiedInputRegistry, UnifiedOutputRegistry):
"""Unified I/O Registry.
.. versionadded:: 5.0
"""
def __init__(self):
super().__init__()
self._registries_order = ("read", "write", "identify")
def get_formats(self, data_class=None, readwrite=None):
"""
Get the list of registered I/O formats as a `~astropy.table.Table`.
Parameters
----------
data_class : class, optional
Filter readers/writer to match data class (default = all classes).
readwrite : str or None, optional
Search only for readers (``"Read"``) or writers (``"Write"``).
If None search for both. Default is None.
.. versionadded:: 1.3
Returns
-------
format_table : :class:`~astropy.table.Table`
Table of available I/O formats.
"""
return super().get_formats(data_class, readwrite)
|
b8e2e2436b870943e7f7392b25cc0409994a788f7ee972d0f64c2448d0c4396c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
``fitsheader`` is a command line script based on astropy.io.fits for printing
the header(s) of one or more FITS file(s) to the standard output in a human-
readable format.
Example uses of fitsheader:
1. Print the header of all the HDUs of a .fits file::
$ fitsheader filename.fits
2. Print the header of the third and fifth HDU extension::
$ fitsheader --extension 3 --extension 5 filename.fits
3. Print the header of a named extension, e.g. select the HDU containing
keywords EXTNAME='SCI' and EXTVER='2'::
$ fitsheader --extension "SCI,2" filename.fits
4. Print only specific keywords::
$ fitsheader --keyword BITPIX --keyword NAXIS filename.fits
5. Print keywords NAXIS, NAXIS1, NAXIS2, etc using a wildcard::
$ fitsheader --keyword NAXIS* filename.fits
6. Dump the header keywords of all the files in the current directory into a
machine-readable csv file::
$ fitsheader --table ascii.csv *.fits > keywords.csv
7. Specify hierarchical keywords with the dotted or spaced notation::
$ fitsheader --keyword ESO.INS.ID filename.fits
$ fitsheader --keyword "ESO INS ID" filename.fits
8. Compare the headers of different fits files, following ESO's ``fitsort``
format::
$ fitsheader --fitsort --extension 0 --keyword ESO.INS.ID *.fits
9. Same as above, sorting the output along a specified keyword::
$ fitsheader -f -s DATE-OBS -e 0 -k DATE-OBS -k ESO.INS.ID *.fits
10. Sort first by OBJECT, then DATE-OBS::
$ fitsheader -f -s OBJECT -s DATE-OBS *.fits
Note that compressed images (HDUs of type
:class:`~astropy.io.fits.CompImageHDU`) really have two headers: a real
BINTABLE header to describe the compressed data, and a fake IMAGE header
representing the image that was compressed. Astropy returns the latter by
default. You must supply the ``--compressed`` option if you require the real
header that describes the compression.
With Astropy installed, please run ``fitsheader --help`` to see the full usage
documentation.
"""
import sys
import argparse
import numpy as np
from astropy.io import fits
from astropy import log, __version__
DESCRIPTION = """
Print the header(s) of a FITS file. Optional arguments allow the desired
extension(s), keyword(s), and output format to be specified.
Note that in the case of a compressed image, the decompressed header is
shown by default.
This script is part of the Astropy package. See
https://docs.astropy.org/en/latest/io/fits/usage/scripts.html#module-astropy.io.fits.scripts.fitsheader
for further documentation.
""".strip()
class ExtensionNotFoundException(Exception):
"""Raised if an HDU extension requested by the user does not exist."""
pass
class HeaderFormatter:
"""Class to format the header(s) of a FITS file for display by the
`fitsheader` tool; essentially a wrapper around a `HDUList` object.
Example usage:
fmt = HeaderFormatter('/path/to/file.fits')
print(fmt.parse(extensions=[0, 3], keywords=['NAXIS', 'BITPIX']))
Parameters
----------
filename : str
Path to a single FITS file.
verbose : bool
Verbose flag, to show more information about missing extensions,
keywords, etc.
Raises
------
OSError
If `filename` does not exist or cannot be read.
"""
def __init__(self, filename, verbose=True):
self.filename = filename
self.verbose = verbose
self._hdulist = fits.open(filename)
def parse(self, extensions=None, keywords=None, compressed=False):
"""Returns the FITS file header(s) in a readable format.
Parameters
----------
extensions : list of int or str, optional
Format only specific HDU(s), identified by number or name.
The name can be composed of the "EXTNAME" or "EXTNAME,EXTVER"
keywords.
keywords : list of str, optional
Keywords for which the value(s) should be returned.
If not specified, then the entire header is returned.
compressed : bool, optional
If True, shows the header describing the compression, rather than
the header obtained after decompression. (Affects FITS files
containing `CompImageHDU` extensions only.)
Returns
-------
formatted_header : str or astropy.table.Table
Traditional 80-char wide format in the case of `HeaderFormatter`;
an Astropy Table object in the case of `TableHeaderFormatter`.
"""
# `hdukeys` will hold the keys of the HDUList items to display
if extensions is None:
hdukeys = range(len(self._hdulist)) # Display all by default
else:
hdukeys = []
for ext in extensions:
try:
# HDU may be specified by number
hdukeys.append(int(ext))
except ValueError:
# The user can specify "EXTNAME" or "EXTNAME,EXTVER"
parts = ext.split(',')
if len(parts) > 1:
extname = ','.join(parts[0:-1])
extver = int(parts[-1])
hdukeys.append((extname, extver))
else:
hdukeys.append(ext)
# Having established which HDUs the user wants, we now format these:
return self._parse_internal(hdukeys, keywords, compressed)
def _parse_internal(self, hdukeys, keywords, compressed):
"""The meat of the formatting; in a separate method to allow overriding.
"""
result = []
for idx, hdu in enumerate(hdukeys):
try:
cards = self._get_cards(hdu, keywords, compressed)
except ExtensionNotFoundException:
continue
if idx > 0: # Separate HDUs by a blank line
result.append('\n')
result.append(f'# HDU {hdu} in {self.filename}:\n')
for c in cards:
result.append(f'{c}\n')
return ''.join(result)
def _get_cards(self, hdukey, keywords, compressed):
"""Returns a list of `astropy.io.fits.card.Card` objects.
This function will return the desired header cards, taking into
account the user's preference to see the compressed or uncompressed
version.
Parameters
----------
hdukey : int or str
Key of a single HDU in the HDUList.
keywords : list of str, optional
Keywords for which the cards should be returned.
compressed : bool, optional
If True, shows the header describing the compression.
Raises
------
ExtensionNotFoundException
If the hdukey does not correspond to an extension.
"""
# First we obtain the desired header
try:
if compressed:
# In the case of a compressed image, return the header before
# decompression (not the default behavior)
header = self._hdulist[hdukey]._header
else:
header = self._hdulist[hdukey].header
except (IndexError, KeyError):
message = f'{self.filename}: Extension {hdukey} not found.'
if self.verbose:
log.warning(message)
raise ExtensionNotFoundException(message)
if not keywords: # return all cards
cards = header.cards
else: # specific keywords are requested
cards = []
for kw in keywords:
try:
crd = header.cards[kw]
if isinstance(crd, fits.card.Card): # Single card
cards.append(crd)
else: # Allow for wildcard access
cards.extend(crd)
except KeyError: # Keyword does not exist
if self.verbose:
log.warning('{filename} (HDU {hdukey}): '
'Keyword {kw} not found.'.format(
filename=self.filename,
hdukey=hdukey,
kw=kw))
return cards
def close(self):
self._hdulist.close()
class TableHeaderFormatter(HeaderFormatter):
"""Class to convert the header(s) of a FITS file into a Table object.
The table returned by the `parse` method will contain four columns:
filename, hdu, keyword, and value.
Subclassed from HeaderFormatter, which contains the meat of the formatting.
"""
def _parse_internal(self, hdukeys, keywords, compressed):
"""Method called by the parse method in the parent class."""
tablerows = []
for hdu in hdukeys:
try:
for card in self._get_cards(hdu, keywords, compressed):
tablerows.append({'filename': self.filename,
'hdu': hdu,
'keyword': card.keyword,
'value': str(card.value)})
except ExtensionNotFoundException:
pass
if tablerows:
from astropy import table
return table.Table(tablerows)
return None
def print_headers_traditional(args):
"""Prints FITS header(s) using the traditional 80-char format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below.
"""
for idx, filename in enumerate(args.filename): # support wildcards
if idx > 0 and not args.keyword:
print() # print a newline between different files
formatter = None
try:
formatter = HeaderFormatter(filename)
print(formatter.parse(args.extensions,
args.keyword,
args.compressed), end='')
except OSError as e:
log.error(str(e))
finally:
if formatter:
formatter.close()
def print_headers_as_table(args):
"""Prints FITS header(s) in a machine-readable table format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below.
"""
tables = []
# Create a Table object for each file
for filename in args.filename: # Support wildcards
formatter = None
try:
formatter = TableHeaderFormatter(filename)
tbl = formatter.parse(args.extensions,
args.keyword,
args.compressed)
if tbl:
tables.append(tbl)
except OSError as e:
log.error(str(e)) # file not found or unreadable
finally:
if formatter:
formatter.close()
# Concatenate the tables
if len(tables) == 0:
return False
elif len(tables) == 1:
resulting_table = tables[0]
else:
from astropy import table
resulting_table = table.vstack(tables)
# Print the string representation of the concatenated table
resulting_table.write(sys.stdout, format=args.table)
def print_headers_as_comparison(args):
"""Prints FITS header(s) with keywords as columns.
This follows the dfits+fitsort format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below.
"""
from astropy import table
tables = []
# Create a Table object for each file
for filename in args.filename: # Support wildcards
formatter = None
try:
formatter = TableHeaderFormatter(filename, verbose=False)
tbl = formatter.parse(args.extensions,
args.keyword,
args.compressed)
if tbl:
# Remove empty keywords
tbl = tbl[np.where(tbl['keyword'] != '')]
else:
tbl = table.Table([[filename]], names=('filename',))
tables.append(tbl)
except OSError as e:
log.error(str(e)) # file not found or unreadable
finally:
if formatter:
formatter.close()
# Concatenate the tables
if len(tables) == 0:
return False
elif len(tables) == 1:
resulting_table = tables[0]
else:
resulting_table = table.vstack(tables)
# If we obtained more than one hdu, merge hdu and keywords columns
hdus = resulting_table['hdu']
if np.ma.isMaskedArray(hdus):
hdus = hdus.compressed()
if len(np.unique(hdus)) > 1:
for tab in tables:
new_column = table.Column(
[f"{row['hdu']}:{row['keyword']}" for row in tab])
tab.add_column(new_column, name='hdu+keyword')
keyword_column_name = 'hdu+keyword'
else:
keyword_column_name = 'keyword'
# Check how many hdus we are processing
final_tables = []
for tab in tables:
final_table = [table.Column([tab['filename'][0]], name='filename')]
if 'value' in tab.colnames:
for row in tab:
if row['keyword'] in ('COMMENT', 'HISTORY'):
continue
final_table.append(table.Column([row['value']],
name=row[keyword_column_name]))
final_tables.append(table.Table(final_table))
final_table = table.vstack(final_tables)
# Sort if requested
if args.sort:
final_table.sort(args.sort)
# Reorganise to keyword by columns
final_table.pprint(max_lines=-1, max_width=-1)
def main(args=None):
"""This is the main function called by the `fitsheader` script."""
parser = argparse.ArgumentParser(
description=DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--version', action='version',
version=f'%(prog)s {__version__}')
parser.add_argument('-e', '--extension', metavar='HDU',
action='append', dest='extensions',
help='specify the extension by name or number; '
'this argument can be repeated '
'to select multiple extensions')
parser.add_argument('-k', '--keyword', metavar='KEYWORD',
action='append', type=str,
help='specify a keyword; this argument can be '
'repeated to select multiple keywords; '
'also supports wildcards')
mode_group = parser.add_mutually_exclusive_group()
mode_group.add_argument('-t', '--table',
nargs='?', default=False, metavar='FORMAT',
help='print the header(s) in machine-readable table '
'format; the default format is '
'"ascii.fixed_width" (can be "ascii.csv", '
'"ascii.html", "ascii.latex", "fits", etc)')
mode_group.add_argument('-f', '--fitsort', action='store_true',
help='print the headers as a table with each unique '
'keyword in a given column (fitsort format) ')
parser.add_argument('-s', '--sort', metavar='SORT_KEYWORD',
action='append', type=str,
help='sort output by the specified header keywords, '
'can be repeated to sort by multiple keywords; '
'Only supported with -f/--fitsort')
parser.add_argument('-c', '--compressed', action='store_true',
help='for compressed image data, '
'show the true header which describes '
'the compression rather than the data')
parser.add_argument('filename', nargs='+',
help='path to one or more files; '
'wildcards are supported')
args = parser.parse_args(args)
# If `--table` was used but no format specified,
# then use ascii.fixed_width by default
if args.table is None:
args.table = 'ascii.fixed_width'
if args.sort:
args.sort = [key.replace('.', ' ') for key in args.sort]
if not args.fitsort:
log.error('Sorting with -s/--sort is only supported in conjunction with -f/--fitsort')
# 2: Unix error convention for command line syntax
sys.exit(2)
if args.keyword:
args.keyword = [key.replace('.', ' ') for key in args.keyword]
# Now print the desired headers
try:
if args.table:
print_headers_as_table(args)
elif args.fitsort:
print_headers_as_comparison(args)
else:
print_headers_traditional(args)
except OSError:
# A 'Broken pipe' OSError may occur when stdout is closed prematurely,
# eg. when calling `fitsheader file.fits | head`. We let this pass.
pass
|
020c8bc8242eedea536738346110383c5d3637a1445f9add5405e0579e67b7f5 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import csv
import operator
import os
import re
import sys
import textwrap
import warnings
from contextlib import suppress
import numpy as np
from numpy import char as chararray
from .base import DELAYED, _ValidHDU, ExtensionHDU
# This module may have many dependencies on astropy.io.fits.column, but
# astropy.io.fits.column has fewer dependencies overall, so it's easier to
# keep table/column-related utilities in astropy.io.fits.column
from astropy.io.fits.column import (FITS2NUMPY, KEYWORD_NAMES, KEYWORD_TO_ATTRIBUTE,
ATTRIBUTE_TO_KEYWORD, TDEF_RE, Column, ColDefs,
_AsciiColDefs, _FormatP, _FormatQ, _makep,
_parse_tformat, _scalar_to_format, _convert_format,
_cmp_recformats)
from astropy.io.fits.fitsrec import FITS_rec, _get_recarray_field, _has_unicode_fields
from astropy.io.fits.header import Header, _pad_length
from astropy.io.fits.util import _is_int, _str_to_num
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning
class FITSTableDumpDialect(csv.excel):
"""
A CSV dialect for the Astropy format of ASCII dumps of FITS tables.
"""
delimiter = ' '
lineterminator = '\n'
quotechar = '"'
quoting = csv.QUOTE_ALL
skipinitialspace = True
class _TableLikeHDU(_ValidHDU):
"""
A class for HDUs that have table-like data. This is used for both
Binary/ASCII tables as well as Random Access Group HDUs (which are
otherwise too dissimilar for tables to use _TableBaseHDU directly).
"""
_data_type = FITS_rec
_columns_type = ColDefs
# TODO: Temporary flag representing whether uints are enabled; remove this
# after restructuring to support uints by default on a per-column basis
_uint = False
@classmethod
def match_header(cls, header):
"""
This is an abstract HDU type for HDUs that contain table-like data.
This is even more abstract than _TableBaseHDU which is specifically for
the standard ASCII and Binary Table types.
"""
raise NotImplementedError
@classmethod
def from_columns(cls, columns, header=None, nrows=0, fill=False,
character_as_bytes=False, **kwargs):
"""
Given either a `ColDefs` object, a sequence of `Column` objects,
or another table HDU or table data (a `FITS_rec` or multi-field
`numpy.ndarray` or `numpy.recarray` object, return a new table HDU of
the class this method was called on using the column definition from
the input.
See also `FITS_rec.from_columns`.
Parameters
----------
columns : sequence of `Column`, `ColDefs` -like
The columns from which to create the table data, or an object with
a column-like structure from which a `ColDefs` can be instantiated.
This includes an existing `BinTableHDU` or `TableHDU`, or a
`numpy.recarray` to give some examples.
If these columns have data arrays attached that data may be used in
initializing the new table. Otherwise the input columns will be
used as a template for a new table with the requested number of
rows.
header : `Header`
An optional `Header` object to instantiate the new HDU yet. Header
keywords specifically related to defining the table structure (such
as the "TXXXn" keywords like TTYPEn) will be overridden by the
supplied column definitions, but all other informational and data
model-specific keywords are kept.
nrows : int
Number of rows in the new table. If the input columns have data
associated with them, the size of the largest input column is used.
Otherwise the default is 0.
fill : bool
If `True`, will fill all cells with zeros or blanks. If `False`,
copy the data from input, undefined cells will still be filled with
zeros/blanks.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the
HDU. By default this is `False` and (unicode) strings are returned,
but for large tables this may use up a lot of memory.
Notes
-----
Any additional keyword arguments accepted by the HDU class's
``__init__`` may also be passed in as keyword arguments.
"""
coldefs = cls._columns_type(columns)
data = FITS_rec.from_columns(coldefs, nrows=nrows, fill=fill,
character_as_bytes=character_as_bytes)
hdu = cls(data=data, header=header, character_as_bytes=character_as_bytes, **kwargs)
coldefs._add_listener(hdu)
return hdu
@lazyproperty
def columns(self):
"""
The :class:`ColDefs` objects describing the columns in this table.
"""
# The base class doesn't make any assumptions about where the column
# definitions come from, so just return an empty ColDefs
return ColDefs([])
@property
def _nrows(self):
"""
table-like HDUs must provide an attribute that specifies the number of
rows in the HDU's table.
For now this is an internal-only attribute.
"""
raise NotImplementedError
def _get_tbdata(self):
"""Get the table data from an input HDU object."""
columns = self.columns
# TODO: Details related to variable length arrays need to be dealt with
# specifically in the BinTableHDU class, since they're a detail
# specific to FITS binary tables
if (any(type(r) in (_FormatP, _FormatQ)
for r in columns._recformats) and
self._data_size is not None and
self._data_size > self._theap):
# We have a heap; include it in the raw_data
raw_data = self._get_raw_data(self._data_size, np.uint8,
self._data_offset)
tbsize = self._header['NAXIS1'] * self._header['NAXIS2']
data = raw_data[:tbsize].view(dtype=columns.dtype,
type=np.rec.recarray)
else:
raw_data = self._get_raw_data(self._nrows, columns.dtype,
self._data_offset)
if raw_data is None:
# This can happen when a brand new table HDU is being created
# and no data has been assigned to the columns, which case just
# return an empty array
raw_data = np.array([], dtype=columns.dtype)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
data = data.view(self._data_type)
columns._add_listener(data)
return data
def _init_tbdata(self, data):
columns = self.columns
data.dtype = data.dtype.newbyteorder('>')
# hack to enable pseudo-uint support
data._uint = self._uint
# pass datLoc, for P format
data._heapoffset = self._theap
data._heapsize = self._header['PCOUNT']
tbsize = self._header['NAXIS1'] * self._header['NAXIS2']
data._gap = self._theap - tbsize
# pass the attributes
for idx, col in enumerate(columns):
# get the data for each column object from the rec.recarray
col.array = data.field(idx)
# delete the _arrays attribute so that it is recreated to point to the
# new data placed in the column object above
del columns._arrays
def _update_load_data(self):
"""Load the data if asked to."""
if not self._data_loaded:
self.data
def _update_column_added(self, columns, column):
"""
Update the data upon addition of a new column through the `ColDefs`
interface.
"""
# recreate data from the columns
self.data = FITS_rec.from_columns(
self.columns, nrows=self._nrows, fill=False,
character_as_bytes=self._character_as_bytes
)
def _update_column_removed(self, columns, col_idx):
"""
Update the data upon removal of a column through the `ColDefs`
interface.
"""
# recreate data from the columns
self.data = FITS_rec.from_columns(
self.columns, nrows=self._nrows, fill=False,
character_as_bytes=self._character_as_bytes
)
class _TableBaseHDU(ExtensionHDU, _TableLikeHDU):
"""
FITS table extension base HDU class.
Parameters
----------
data : array
Data to be used.
header : `Header` instance
Header to be used. If the ``data`` is also specified, header keywords
specifically related to defining the table structure (such as the
"TXXXn" keywords like TTYPEn) will be overridden by the supplied column
definitions, but all other informational and data model-specific
keywords are kept.
name : str
Name to be populated in ``EXTNAME`` keyword.
uint : bool, optional
Set to `True` if the table contains unsigned integer columns.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_manages_own_heap = False
"""
This flag implies that when writing VLA tables (P/Q format) the heap
pointers that go into P/Q table columns should not be reordered or
rearranged in any way by the default heap management code.
This is included primarily as an optimization for compressed image HDUs
which perform their own heap maintenance.
"""
def __init__(self, data=None, header=None, name=None, uint=False, ver=None,
character_as_bytes=False):
super().__init__(data=data, header=header, name=name, ver=ver)
self._uint = uint
self._character_as_bytes = character_as_bytes
if data is DELAYED:
# this should never happen
if header is None:
raise ValueError('No header to setup HDU.')
# if the file is read the first time, no need to copy, and keep it
# unchanged
else:
self._header = header
else:
# construct a list of cards of minimal header
cards = [
('XTENSION', self._extension, self._ext_comment),
('BITPIX', 8, 'array data type'),
('NAXIS', 2, 'number of array dimensions'),
('NAXIS1', 0, 'length of dimension 1'),
('NAXIS2', 0, 'length of dimension 2'),
('PCOUNT', 0, 'number of group parameters'),
('GCOUNT', 1, 'number of groups'),
('TFIELDS', 0, 'number of table fields')]
if header is not None:
# Make a "copy" (not just a view) of the input header, since it
# may get modified. the data is still a "view" (for now)
hcopy = header.copy(strip=True)
cards.extend(hcopy.cards)
self._header = Header(cards)
if isinstance(data, np.ndarray) and data.dtype.fields is not None:
# self._data_type is FITS_rec.
if isinstance(data, self._data_type):
self.data = data
else:
self.data = self._data_type.from_columns(data)
# TEMP: Special column keywords are normally overwritten by attributes
# from Column objects. In Astropy 3.0, several new keywords are now
# recognized as being special column keywords, but we don't
# automatically clear them yet, as we need to raise a deprecation
# warning for at least one major version.
if header is not None:
future_ignore = set()
for keyword in header.keys():
match = TDEF_RE.match(keyword)
try:
base_keyword = match.group('label')
except Exception:
continue # skip if there is no match
if base_keyword in {'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS'}:
future_ignore.add(base_keyword)
if future_ignore:
keys = ', '.join(x + 'n' for x in sorted(future_ignore))
warnings.warn("The following keywords are now recognized as special "
"column-related attributes and should be set via the "
"Column objects: {}. In future, these values will be "
"dropped from manually specified headers automatically "
"and replaced with values generated based on the "
"Column objects.".format(keys), AstropyDeprecationWarning)
# TODO: Too much of the code in this class uses header keywords
# in making calculations related to the data size. This is
# unreliable, however, in cases when users mess with the header
# unintentionally--code that does this should be cleaned up.
self._header['NAXIS1'] = self.data._raw_itemsize
self._header['NAXIS2'] = self.data.shape[0]
self._header['TFIELDS'] = len(self.data._coldefs)
self.columns = self.data._coldefs
self.columns._add_listener(self.data)
self.update()
with suppress(TypeError, AttributeError):
# Make the ndarrays in the Column objects of the ColDefs
# object of the HDU reference the same ndarray as the HDU's
# FITS_rec object.
for idx, col in enumerate(self.columns):
col.array = self.data.field(idx)
# Delete the _arrays attribute so that it is recreated to
# point to the new data placed in the column objects above
del self.columns._arrays
elif data is None:
pass
else:
raise TypeError('Table data has incorrect type.')
# Ensure that the correct EXTNAME is set on the new header if one was
# created, or that it overrides the existing EXTNAME if different
if name:
self.name = name
if ver is not None:
self.ver = ver
@classmethod
def match_header(cls, header):
"""
This is an abstract type that implements the shared functionality of
the ASCII and Binary Table HDU types, which should be used instead of
this.
"""
raise NotImplementedError
@lazyproperty
def columns(self):
"""
The :class:`ColDefs` objects describing the columns in this table.
"""
if self._has_data and hasattr(self.data, '_coldefs'):
return self.data._coldefs
return self._columns_type(self)
@lazyproperty
def data(self):
data = self._get_tbdata()
data._coldefs = self.columns
data._character_as_bytes = self._character_as_bytes
# Columns should now just return a reference to the data._coldefs
del self.columns
return data
@data.setter
def data(self, data):
if 'data' in self.__dict__:
if self.__dict__['data'] is data:
return
else:
self._data_replaced = True
else:
self._data_replaced = True
self._modified = True
if data is None and self.columns:
# Create a new table with the same columns, but empty rows
formats = ','.join(self.columns._recformats)
data = np.rec.array(None, formats=formats,
names=self.columns.names,
shape=0)
if isinstance(data, np.ndarray) and data.dtype.fields is not None:
# Go ahead and always make a view, even if the data is already the
# correct class (self._data_type) so we can update things like the
# column defs, if necessary
data = data.view(self._data_type)
if not isinstance(data.columns, self._columns_type):
# This would be the place, if the input data was for an ASCII
# table and this is binary table, or vice versa, to convert the
# data to the appropriate format for the table type
new_columns = self._columns_type(data.columns)
data = FITS_rec.from_columns(new_columns)
if 'data' in self.__dict__:
self.columns._remove_listener(self.__dict__['data'])
self.__dict__['data'] = data
self.columns = self.data.columns
self.columns._add_listener(self.data)
self.update()
with suppress(TypeError, AttributeError):
# Make the ndarrays in the Column objects of the ColDefs
# object of the HDU reference the same ndarray as the HDU's
# FITS_rec object.
for idx, col in enumerate(self.columns):
col.array = self.data.field(idx)
# Delete the _arrays attribute so that it is recreated to
# point to the new data placed in the column objects above
del self.columns._arrays
elif data is None:
pass
else:
raise TypeError('Table data has incorrect type.')
# returning the data signals to lazyproperty that we've already handled
# setting self.__dict__['data']
return data
@property
def _nrows(self):
if not self._data_loaded:
return self._header.get('NAXIS2', 0)
else:
return len(self.data)
@lazyproperty
def _theap(self):
size = self._header['NAXIS1'] * self._header['NAXIS2']
return self._header.get('THEAP', size)
# TODO: Need to either rename this to update_header, for symmetry with the
# Image HDUs, or just at some point deprecate it and remove it altogether,
# since header updates should occur automatically when necessary...
def update(self):
"""
Update header keywords to reflect recent changes of columns.
"""
self._header.set('NAXIS1', self.data._raw_itemsize, after='NAXIS')
self._header.set('NAXIS2', self.data.shape[0], after='NAXIS1')
self._header.set('TFIELDS', len(self.columns), after='GCOUNT')
self._clear_table_keywords()
self._populate_table_keywords()
def copy(self):
"""
Make a copy of the table HDU, both header and data are copied.
"""
# touch the data, so it's defined (in the case of reading from a
# FITS file)
return self.__class__(data=self.data.copy(),
header=self._header.copy())
def _prewriteto(self, checksum=False, inplace=False):
if self._has_data:
self.data._scale_back(
update_heap_pointers=not self._manages_own_heap)
# check TFIELDS and NAXIS2
self._header['TFIELDS'] = len(self.data._coldefs)
self._header['NAXIS2'] = self.data.shape[0]
# calculate PCOUNT, for variable length tables
tbsize = self._header['NAXIS1'] * self._header['NAXIS2']
heapstart = self._header.get('THEAP', tbsize)
self.data._gap = heapstart - tbsize
pcount = self.data._heapsize + self.data._gap
if pcount > 0:
self._header['PCOUNT'] = pcount
# update the other T****n keywords
self._populate_table_keywords()
# update TFORM for variable length columns
for idx in range(self.data._nfields):
format = self.data._coldefs._recformats[idx]
if isinstance(format, _FormatP):
_max = self.data.field(idx).max
# May be either _FormatP or _FormatQ
format_cls = format.__class__
format = format_cls(format.dtype, repeat=format.repeat,
max=_max)
self._header['TFORM' + str(idx + 1)] = format.tform
return super()._prewriteto(checksum, inplace)
def _verify(self, option='warn'):
"""
_TableBaseHDU verify method.
"""
errs = super()._verify(option=option)
if (len(self._header) > 1):
if not (isinstance(self._header[0], str) and
self._header[0].rstrip() == self._extension):
err_text = 'The XTENSION keyword must match the HDU type.'
fix_text = f'Converted the XTENSION keyword to {self._extension}.'
def fix(header=self._header):
header[0] = (self._extension, self._ext_comment)
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix))
self.req_cards('NAXIS', None, lambda v: (v == 2), 2, option, errs)
self.req_cards('BITPIX', None, lambda v: (v == 8), 8, option, errs)
self.req_cards('TFIELDS', 7,
lambda v: (_is_int(v) and v >= 0 and v <= 999), 0,
option, errs)
tfields = self._header['TFIELDS']
for idx in range(tfields):
self.req_cards('TFORM' + str(idx + 1), None, None, None, option,
errs)
return errs
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
nrows = 0
else:
nrows = len(self.data)
ncols = len(self.columns)
format = self.columns.formats
# if data is not touched yet, use header info.
else:
nrows = self._header['NAXIS2']
ncols = self._header['TFIELDS']
format = ', '.join([self._header['TFORM' + str(j + 1)]
for j in range(ncols)])
format = f'[{format}]'
dims = f"{nrows}R x {ncols}C"
ncards = len(self._header)
return (self.name, self.ver, class_name, ncards, dims, format)
def _update_column_removed(self, columns, idx):
super()._update_column_removed(columns, idx)
# Fix the header to reflect the column removal
self._clear_table_keywords(index=idx)
def _update_column_attribute_changed(self, column, col_idx, attr,
old_value, new_value):
"""
Update the header when one of the column objects is updated.
"""
# base_keyword is the keyword without the index such as TDIM
# while keyword is like TDIM1
base_keyword = ATTRIBUTE_TO_KEYWORD[attr]
keyword = base_keyword + str(col_idx + 1)
if keyword in self._header:
if new_value is None:
# If the new value is None, i.e. None was assigned to the
# column attribute, then treat this as equivalent to deleting
# that attribute
del self._header[keyword]
else:
self._header[keyword] = new_value
else:
keyword_idx = KEYWORD_NAMES.index(base_keyword)
# Determine the appropriate keyword to insert this one before/after
# if it did not already exist in the header
for before_keyword in reversed(KEYWORD_NAMES[:keyword_idx]):
before_keyword += str(col_idx + 1)
if before_keyword in self._header:
self._header.insert(before_keyword, (keyword, new_value),
after=True)
break
else:
for after_keyword in KEYWORD_NAMES[keyword_idx + 1:]:
after_keyword += str(col_idx + 1)
if after_keyword in self._header:
self._header.insert(after_keyword,
(keyword, new_value))
break
else:
# Just append
self._header[keyword] = new_value
def _clear_table_keywords(self, index=None):
"""
Wipe out any existing table definition keywords from the header.
If specified, only clear keywords for the given table index (shifting
up keywords for any other columns). The index is zero-based.
Otherwise keywords for all columns.
"""
# First collect all the table structure related keyword in the header
# into a single list so we can then sort them by index, which will be
# useful later for updating the header in a sensible order (since the
# header *might* not already be written in a reasonable order)
table_keywords = []
for idx, keyword in enumerate(self._header.keys()):
match = TDEF_RE.match(keyword)
try:
base_keyword = match.group('label')
except Exception:
continue # skip if there is no match
if base_keyword in KEYWORD_TO_ATTRIBUTE:
# TEMP: For Astropy 3.0 we don't clear away the following keywords
# as we are first raising a deprecation warning that these will be
# dropped automatically if they were specified in the header. We
# can remove this once we are happy to break backward-compatibility
if base_keyword in {'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS'}:
continue
num = int(match.group('num')) - 1 # convert to zero-base
table_keywords.append((idx, match.group(0), base_keyword,
num))
# First delete
rev_sorted_idx_0 = sorted(table_keywords, key=operator.itemgetter(0),
reverse=True)
for idx, keyword, _, num in rev_sorted_idx_0:
if index is None or index == num:
del self._header[idx]
# Now shift up remaining column keywords if only one column was cleared
if index is not None:
sorted_idx_3 = sorted(table_keywords, key=operator.itemgetter(3))
for _, keyword, base_keyword, num in sorted_idx_3:
if num <= index:
continue
old_card = self._header.cards[keyword]
new_card = (base_keyword + str(num), old_card.value,
old_card.comment)
self._header.insert(keyword, new_card)
del self._header[keyword]
# Also decrement TFIELDS
if 'TFIELDS' in self._header:
self._header['TFIELDS'] -= 1
def _populate_table_keywords(self):
"""Populate the new table definition keywords from the header."""
for idx, column in enumerate(self.columns):
for keyword, attr in KEYWORD_TO_ATTRIBUTE.items():
val = getattr(column, attr)
if val is not None:
keyword = keyword + str(idx + 1)
self._header[keyword] = val
class TableHDU(_TableBaseHDU):
"""
FITS ASCII table extension HDU class.
Parameters
----------
data : array or `FITS_rec`
Data to be used.
header : `Header`
Header to be used.
name : str
Name to be populated in ``EXTNAME`` keyword.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_extension = 'TABLE'
_ext_comment = 'ASCII table extension'
_padding_byte = ' '
_columns_type = _AsciiColDefs
__format_RE = re.compile(
r'(?P<code>[ADEFIJ])(?P<width>\d+)(?:\.(?P<prec>\d+))?')
def __init__(self, data=None, header=None, name=None, ver=None, character_as_bytes=False):
super().__init__(data, header, name=name, ver=ver, character_as_bytes=character_as_bytes)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return card.keyword == 'XTENSION' and xtension == cls._extension
def _get_tbdata(self):
columns = self.columns
names = [n for idx, n in enumerate(columns.names)]
# determine if there are duplicate field names and if there
# are throw an exception
dup = np.rec.find_duplicate(names)
if dup:
raise ValueError(f"Duplicate field names: {dup}")
# TODO: Determine if this extra logic is necessary--I feel like the
# _AsciiColDefs class should be responsible for telling the table what
# its dtype should be...
itemsize = columns.spans[-1] + columns.starts[-1] - 1
dtype = {}
for idx in range(len(columns)):
data_type = 'S' + str(columns.spans[idx])
if idx == len(columns) - 1:
# The last column is padded out to the value of NAXIS1
if self._header['NAXIS1'] > itemsize:
data_type = 'S' + str(columns.spans[idx] +
self._header['NAXIS1'] - itemsize)
dtype[columns.names[idx]] = (data_type, columns.starts[idx] - 1)
raw_data = self._get_raw_data(self._nrows, dtype, self._data_offset)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
return data.view(self._data_type)
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
# We need to pad the data to a block length before calculating
# the datasum.
bytes_array = self.data.view(type=np.ndarray, dtype=np.ubyte)
padding = np.frombuffer(_pad_length(self.size) * b' ',
dtype=np.ubyte)
d = np.append(bytes_array, padding)
cs = self._compute_checksum(d)
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _verify(self, option='warn'):
"""
`TableHDU` verify method.
"""
errs = super()._verify(option=option)
self.req_cards('PCOUNT', None, lambda v: (v == 0), 0, option, errs)
tfields = self._header['TFIELDS']
for idx in range(tfields):
self.req_cards('TBCOL' + str(idx + 1), None, _is_int, None, option,
errs)
return errs
class BinTableHDU(_TableBaseHDU):
"""
Binary table HDU class.
Parameters
----------
data : array, `FITS_rec`, or `~astropy.table.Table`
Data to be used.
header : `Header`
Header to be used.
name : str
Name to be populated in ``EXTNAME`` keyword.
uint : bool, optional
Set to `True` if the table contains unsigned integer columns.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_extension = 'BINTABLE'
_ext_comment = 'binary table extension'
def __init__(self, data=None, header=None, name=None, uint=False, ver=None,
character_as_bytes=False):
from astropy.table import Table
if isinstance(data, Table):
from astropy.io.fits.convenience import table_to_hdu
hdu = table_to_hdu(data)
if header is not None:
hdu.header.update(header)
data = hdu.data
header = hdu.header
super().__init__(data, header, name=name, uint=uint, ver=ver,
character_as_bytes=character_as_bytes)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return (card.keyword == 'XTENSION' and
xtension in (cls._extension, 'A3DTABLE'))
def _calculate_datasum_with_heap(self):
"""
Calculate the value for the ``DATASUM`` card given the input data
"""
with _binary_table_byte_swap(self.data) as data:
dout = data.view(type=np.ndarray, dtype=np.ubyte)
csum = self._compute_checksum(dout)
# Now add in the heap data to the checksum (we can skip any gap
# between the table and the heap since it's all zeros and doesn't
# contribute to the checksum
if data._get_raw_data() is None:
# This block is still needed because
# test_variable_length_table_data leads to ._get_raw_data
# returning None which means _get_heap_data doesn't work.
# Which happens when the data is loaded in memory rather than
# being unloaded on disk
for idx in range(data._nfields):
if isinstance(data.columns._recformats[idx], _FormatP):
for coldata in data.field(idx):
# coldata should already be byteswapped from the call
# to _binary_table_byte_swap
if not len(coldata):
continue
csum = self._compute_checksum(coldata, csum)
else:
csum = self._compute_checksum(data._get_heap_data(), csum)
return csum
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# This method calculates the datasum while incorporating any
# heap data, which is obviously not handled from the base
# _calculate_datasum
return self._calculate_datasum_with_heap()
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _writedata_internal(self, fileobj):
size = 0
if self.data is None:
return size
with _binary_table_byte_swap(self.data) as data:
if _has_unicode_fields(data):
# If the raw data was a user-supplied recarray, we can't write
# unicode columns directly to the file, so we have to switch
# to a slower row-by-row write
self._writedata_by_row(fileobj)
else:
fileobj.writearray(data)
# write out the heap of variable length array columns this has
# to be done after the "regular" data is written (above)
# to avoid a bug in the lustre filesystem client, don't
# write 0-byte objects
if data._gap > 0:
fileobj.write((data._gap * '\0').encode('ascii'))
nbytes = data._gap
if not self._manages_own_heap:
# Write the heap data one column at a time, in the order
# that the data pointers appear in the column (regardless
# if that data pointer has a different, previous heap
# offset listed)
for idx in range(data._nfields):
if not isinstance(data.columns._recformats[idx],
_FormatP):
continue
field = self.data.field(idx)
for row in field:
if len(row) > 0:
nbytes += row.nbytes
fileobj.writearray(row)
else:
heap_data = data._get_heap_data()
if len(heap_data) > 0:
nbytes += len(heap_data)
fileobj.writearray(heap_data)
data._heapsize = nbytes - data._gap
size += nbytes
size += self.data.size * self.data._raw_itemsize
return size
def _writedata_by_row(self, fileobj):
fields = [self.data.field(idx)
for idx in range(len(self.data.columns))]
# Creating Record objects is expensive (as in
# `for row in self.data:` so instead we just iterate over the row
# indices and get one field at a time:
for idx in range(len(self.data)):
for field in fields:
item = field[idx]
field_width = None
if field.dtype.kind == 'U':
# Read the field *width* by reading past the field kind.
i = field.dtype.str.index(field.dtype.kind)
field_width = int(field.dtype.str[i+1:])
item = np.char.encode(item, 'ascii')
fileobj.writearray(item)
if field_width is not None:
j = item.dtype.str.index(item.dtype.kind)
item_length = int(item.dtype.str[j+1:])
# Fix padding problem (see #5296).
padding = '\x00'*(field_width - item_length)
fileobj.write(padding.encode('ascii'))
_tdump_file_format = textwrap.dedent("""
- **datafile:** Each line of the data file represents one row of table
data. The data is output one column at a time in column order. If
a column contains an array, each element of the column array in the
current row is output before moving on to the next column. Each row
ends with a new line.
Integer data is output right-justified in a 21-character field
followed by a blank. Floating point data is output right justified
using 'g' format in a 21-character field with 15 digits of
precision, followed by a blank. String data that does not contain
whitespace is output left-justified in a field whose width matches
the width specified in the ``TFORM`` header parameter for the
column, followed by a blank. When the string data contains
whitespace characters, the string is enclosed in quotation marks
(``""``). For the last data element in a row, the trailing blank in
the field is replaced by a new line character.
For column data containing variable length arrays ('P' format), the
array data is preceded by the string ``'VLA_Length= '`` and the
integer length of the array for that row, left-justified in a
21-character field, followed by a blank.
.. note::
This format does *not* support variable length arrays using the
('Q' format) due to difficult to overcome ambiguities. What this
means is that this file format cannot support VLA columns in
tables stored in files that are over 2 GB in size.
For column data representing a bit field ('X' format), each bit
value in the field is output right-justified in a 21-character field
as 1 (for true) or 0 (for false).
- **cdfile:** Each line of the column definitions file provides the
definitions for one column in the table. The line is broken up into
8, sixteen-character fields. The first field provides the column
name (``TTYPEn``). The second field provides the column format
(``TFORMn``). The third field provides the display format
(``TDISPn``). The fourth field provides the physical units
(``TUNITn``). The fifth field provides the dimensions for a
multidimensional array (``TDIMn``). The sixth field provides the
value that signifies an undefined value (``TNULLn``). The seventh
field provides the scale factor (``TSCALn``). The eighth field
provides the offset value (``TZEROn``). A field value of ``""`` is
used to represent the case where no value is provided.
- **hfile:** Each line of the header parameters file provides the
definition of a single HDU header card as represented by the card
image.
""")
def dump(self, datafile=None, cdfile=None, hfile=None, overwrite=False):
"""
Dump the table HDU to a file in ASCII format. The table may be dumped
in three separate files, one containing column definitions, one
containing header parameters, and one for table data.
Parameters
----------
datafile : path-like or file-like, optional
Output data file. The default is the root name of the
fits file associated with this HDU appended with the
extension ``.txt``.
cdfile : path-like or file-like, optional
Output column definitions file. The default is `None`, no
column definitions output is produced.
hfile : path-like or file-like, optional
Output header parameters file. The default is `None`,
no header parameters output is produced.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
Notes
-----
The primary use for the `dump` method is to allow viewing and editing
the table data and parameters in a standard text editor.
The `load` method can be used to create a new table from the three
plain text (ASCII) files.
"""
# check if the output files already exist
exist = []
files = [datafile, cdfile, hfile]
for f in files:
if isinstance(f, str):
if os.path.exists(f) and os.path.getsize(f) != 0:
if overwrite:
os.remove(f)
else:
exist.append(f)
if exist:
raise OSError(' '.join([f"File '{f}' already exists."
for f in exist])+" If you mean to "
"replace the file(s) "
"then use the argument "
"'overwrite=True'.")
# Process the data
self._dump_data(datafile)
# Process the column definitions
if cdfile:
self._dump_coldefs(cdfile)
# Process the header parameters
if hfile:
self._header.tofile(hfile, sep='\n', endcard=False, padding=False)
if isinstance(dump.__doc__, str):
dump.__doc__ += _tdump_file_format.replace('\n', '\n ')
def load(cls, datafile, cdfile=None, hfile=None, replace=False,
header=None):
"""
Create a table from the input ASCII files. The input is from up to
three separate files, one containing column definitions, one containing
header parameters, and one containing column data.
The column definition and header parameters files are not required.
When absent the column definitions and/or header parameters are taken
from the header object given in the header argument; otherwise sensible
defaults are inferred (though this mode is not recommended).
Parameters
----------
datafile : path-like or file-like
Input data file containing the table data in ASCII format.
cdfile : path-like or file-like, optional
Input column definition file containing the names,
formats, display formats, physical units, multidimensional
array dimensions, undefined values, scale factors, and
offsets associated with the columns in the table. If
`None`, the column definitions are taken from the current
values in this object.
hfile : path-like or file-like, optional
Input parameter definition file containing the header
parameter definitions to be associated with the table. If
`None`, the header parameter definitions are taken from
the current values in this objects header.
replace : bool, optional
When `True`, indicates that the entire header should be
replaced with the contents of the ASCII file instead of
just updating the current header.
header : `~astropy.io.fits.Header`, optional
When the cdfile and hfile are missing, use this Header object in
the creation of the new table and HDU. Otherwise this Header
supersedes the keywords from hfile, which is only used to update
values not present in this Header, unless ``replace=True`` in which
this Header's values are completely replaced with the values from
hfile.
Notes
-----
The primary use for the `load` method is to allow the input of ASCII
data that was edited in a standard text editor of the table data and
parameters. The `dump` method can be used to create the initial ASCII
files.
"""
# Process the parameter file
if header is None:
header = Header()
if hfile:
if replace:
header = Header.fromtextfile(hfile)
else:
header.extend(Header.fromtextfile(hfile), update=True,
update_first=True)
coldefs = None
# Process the column definitions file
if cdfile:
coldefs = cls._load_coldefs(cdfile)
# Process the data file
data = cls._load_data(datafile, coldefs)
if coldefs is None:
coldefs = ColDefs(data)
# Create a new HDU using the supplied header and data
hdu = cls(data=data, header=header)
hdu.columns = coldefs
return hdu
if isinstance(load.__doc__, str):
load.__doc__ += _tdump_file_format.replace('\n', '\n ')
load = classmethod(load)
# Have to create a classmethod from this here instead of as a decorator;
# otherwise we can't update __doc__
def _dump_data(self, fileobj):
"""
Write the table data in the ASCII format read by BinTableHDU.load()
to fileobj.
"""
if not fileobj and self._file:
root = os.path.splitext(self._file.name)[0]
fileobj = root + '.txt'
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'w')
close_file = True
linewriter = csv.writer(fileobj, dialect=FITSTableDumpDialect)
# Process each row of the table and output one row at a time
def format_value(val, format):
if format[0] == 'S':
itemsize = int(format[1:])
return '{:{size}}'.format(val, size=itemsize)
elif format in np.typecodes['AllInteger']:
# output integer
return f'{val:21d}'
elif format in np.typecodes['Complex']:
return f'{val.real:21.15g}+{val.imag:.15g}j'
elif format in np.typecodes['Float']:
# output floating point
return f'{val:#21.15g}'
for row in self.data:
line = [] # the line for this row of the table
# Process each column of the row.
for column in self.columns:
# format of data in a variable length array
# where None means it is not a VLA:
vla_format = None
format = _convert_format(column.format)
if isinstance(format, _FormatP):
# P format means this is a variable length array so output
# the length of the array for this row and set the format
# for the VLA data
line.append('VLA_Length=')
line.append(f'{len(row[column.name]):21d}')
_, dtype, option = _parse_tformat(column.format)
vla_format = FITS2NUMPY[option[0]][0]
if vla_format:
# Output the data for each element in the array
for val in row[column.name].flat:
line.append(format_value(val, vla_format))
else:
# The column data is a single element
dtype = self.data.dtype.fields[column.name][0]
array_format = dtype.char
if array_format == 'V':
array_format = dtype.base.char
if array_format == 'S':
array_format += str(dtype.itemsize)
if dtype.char == 'V':
for value in row[column.name].flat:
line.append(format_value(value, array_format))
else:
line.append(format_value(row[column.name],
array_format))
linewriter.writerow(line)
if close_file:
fileobj.close()
def _dump_coldefs(self, fileobj):
"""
Write the column definition parameters in the ASCII format read by
BinTableHDU.load() to fileobj.
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'w')
close_file = True
# Process each column of the table and output the result to the
# file one at a time
for column in self.columns:
line = [column.name, column.format]
attrs = ['disp', 'unit', 'dim', 'null', 'bscale', 'bzero']
line += ['{!s:16s}'.format(value if value else '""')
for value in (getattr(column, attr) for attr in attrs)]
fileobj.write(' '.join(line))
fileobj.write('\n')
if close_file:
fileobj.close()
@classmethod
def _load_data(cls, fileobj, coldefs=None):
"""
Read the table data from the ASCII file output by BinTableHDU.dump().
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'r')
close_file = True
initialpos = fileobj.tell() # We'll be returning here later
linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)
# First we need to do some preprocessing on the file to find out how
# much memory we'll need to reserve for the table. This is necessary
# even if we already have the coldefs in order to determine how many
# rows to reserve memory for
vla_lengths = []
recformats = []
names = []
nrows = 0
if coldefs is not None:
recformats = coldefs._recformats
names = coldefs.names
def update_recformats(value, idx):
fitsformat = _scalar_to_format(value)
recformat = _convert_format(fitsformat)
if idx >= len(recformats):
recformats.append(recformat)
else:
if _cmp_recformats(recformats[idx], recformat) < 0:
recformats[idx] = recformat
# TODO: The handling of VLAs could probably be simplified a bit
for row in linereader:
nrows += 1
if coldefs is not None:
continue
col = 0
idx = 0
while idx < len(row):
if row[idx] == 'VLA_Length=':
if col < len(vla_lengths):
vla_length = vla_lengths[col]
else:
vla_length = int(row[idx + 1])
vla_lengths.append(vla_length)
idx += 2
while vla_length:
update_recformats(row[idx], col)
vla_length -= 1
idx += 1
col += 1
else:
if col >= len(vla_lengths):
vla_lengths.append(None)
update_recformats(row[idx], col)
col += 1
idx += 1
# Update the recformats for any VLAs
for idx, length in enumerate(vla_lengths):
if length is not None:
recformats[idx] = str(length) + recformats[idx]
dtype = np.rec.format_parser(recformats, names, None).dtype
# TODO: In the future maybe enable loading a bit at a time so that we
# can convert from this format to an actual FITS file on disk without
# needing enough physical memory to hold the entire thing at once
hdu = BinTableHDU.from_columns(np.recarray(shape=1, dtype=dtype),
nrows=nrows, fill=True)
# TODO: It seems to me a lot of this could/should be handled from
# within the FITS_rec class rather than here.
data = hdu.data
for idx, length in enumerate(vla_lengths):
if length is not None:
arr = data.columns._arrays[idx]
dt = recformats[idx][len(str(length)):]
# NOTE: FormatQ not supported here; it's hard to determine
# whether or not it will be necessary to use a wider descriptor
# type. The function documentation will have to serve as a
# warning that this is not supported.
recformats[idx] = _FormatP(dt, max=length)
data.columns._recformats[idx] = recformats[idx]
name = data.columns.names[idx]
data._cache_field(name, _makep(arr, arr, recformats[idx]))
def format_value(col, val):
# Special formatting for a couple particular data types
if recformats[col] == FITS2NUMPY['L']:
return bool(int(val))
elif recformats[col] == FITS2NUMPY['M']:
# For some reason, in arrays/fields where numpy expects a
# complex it's not happy to take a string representation
# (though it's happy to do that in other contexts), so we have
# to convert the string representation for it:
return complex(val)
else:
return val
# Jump back to the start of the data and create a new line reader
fileobj.seek(initialpos)
linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)
for row, line in enumerate(linereader):
col = 0
idx = 0
while idx < len(line):
if line[idx] == 'VLA_Length=':
vla_len = vla_lengths[col]
idx += 2
slice_ = slice(idx, idx + vla_len)
data[row][col][:] = line[idx:idx + vla_len]
idx += vla_len
elif dtype[col].shape:
# This is an array column
array_size = int(np.multiply.reduce(dtype[col].shape))
slice_ = slice(idx, idx + array_size)
idx += array_size
else:
slice_ = None
if slice_ is None:
# This is a scalar row element
data[row][col] = format_value(col, line[idx])
idx += 1
else:
data[row][col].flat[:] = [format_value(col, val)
for val in line[slice_]]
col += 1
if close_file:
fileobj.close()
return data
@classmethod
def _load_coldefs(cls, fileobj):
"""
Read the table column definitions from the ASCII file output by
BinTableHDU.dump().
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'r')
close_file = True
columns = []
for line in fileobj:
words = line[:-1].split()
kwargs = {}
for key in ['name', 'format', 'disp', 'unit', 'dim']:
kwargs[key] = words.pop(0).replace('""', '')
for key in ['null', 'bscale', 'bzero']:
word = words.pop(0).replace('""', '')
if word:
word = _str_to_num(word)
kwargs[key] = word
columns.append(Column(**kwargs))
if close_file:
fileobj.close()
return ColDefs(columns)
@contextlib.contextmanager
def _binary_table_byte_swap(data):
"""
Ensures that all the data of a binary FITS table (represented as a FITS_rec
object) is in a big-endian byte order. Columns are swapped in-place one
at a time, and then returned to their previous byte order when this context
manager exits.
Because a new dtype is needed to represent the byte-swapped columns, the
new dtype is temporarily applied as well.
"""
orig_dtype = data.dtype
names = []
formats = []
offsets = []
to_swap = []
if sys.byteorder == 'little':
swap_types = ('<', '=')
else:
swap_types = ('<',)
for idx, name in enumerate(orig_dtype.names):
field = _get_recarray_field(data, idx)
field_dtype, field_offset = orig_dtype.fields[name]
names.append(name)
formats.append(field_dtype)
offsets.append(field_offset)
if isinstance(field, chararray.chararray):
continue
# only swap unswapped
# must use field_dtype.base here since for multi-element dtypes,
# the .str with be '|V<N>' where <N> is the total bytes per element
if field.itemsize > 1 and field_dtype.base.str[0] in swap_types:
to_swap.append(field)
# Override the dtype for this field in the new record dtype with
# the byteswapped version
formats[-1] = field_dtype.newbyteorder()
# deal with var length table
recformat = data.columns._recformats[idx]
if isinstance(recformat, _FormatP):
coldata = data.field(idx)
for c in coldata:
if (not isinstance(c, chararray.chararray) and
c.itemsize > 1 and c.dtype.str[0] in swap_types):
to_swap.append(c)
for arr in reversed(to_swap):
arr.byteswap(True)
data.dtype = np.dtype({'names': names,
'formats': formats,
'offsets': offsets})
yield data
for arr in to_swap:
arr.byteswap(True)
data.dtype = orig_dtype
|
Subsets and Splits